From 13142fbaef3ea2d52f606f53ce08b47cb45476e7 Mon Sep 17 00:00:00 2001 From: harshaljanjani Date: Tue, 16 Sep 2025 21:24:09 +0400 Subject: [PATCH 01/10] init: Added some stuff, tons to go --- keras_hub/api/models/__init__.py | 3 + keras_hub/src/models/gemma3n/__init__.py | 0 .../src/models/gemma3n/gemma3n_attention.py | 605 ++++++++++++ .../models/gemma3n/gemma3n_audio_encoder.py | 511 +++++++++++ .../models/gemma3n/gemma3n_audio_layers.py | 526 +++++++++++ .../src/models/gemma3n/gemma3n_backbone.py | 865 ++++++++++++++++++ .../models/gemma3n/gemma3n_backbone_test.py | 185 ++++ .../models/gemma3n/gemma3n_text_decoder.py | 274 ++++++ .../src/models/gemma3n/gemma3n_text_layers.py | 426 +++++++++ .../src/models/gemma3n/gemma3n_text_model.py | 384 ++++++++ keras_hub/src/models/gemma3n/gemma3n_utils.py | 122 +++ .../src/models/gemma3n/rms_normalization.py | 67 ++ 12 files changed, 3968 insertions(+) create mode 100644 keras_hub/src/models/gemma3n/__init__.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_attention.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_audio_encoder.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_audio_layers.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_backbone.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_backbone_test.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_text_decoder.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_text_layers.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_text_model.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_utils.py create mode 100644 keras_hub/src/models/gemma3n/rms_normalization.py diff --git a/keras_hub/api/models/__init__.py b/keras_hub/api/models/__init__.py index fe220e2d43..28f4e071fc 100644 --- a/keras_hub/api/models/__init__.py +++ b/keras_hub/api/models/__init__.py @@ -297,6 +297,9 @@ from keras_hub.src.models.gemma3.gemma3_vision_encoder import ( Gemma3VisionEncoder as Gemma3VisionEncoder, ) +from keras_hub.src.models.gemma3n.gemma3n_backbone import ( + Gemma3nBackbone as Gemma3nBackbone, +) from keras_hub.src.models.gpt2.gpt2_backbone import GPT2Backbone as GPT2Backbone from keras_hub.src.models.gpt2.gpt2_causal_lm import ( GPT2CausalLM as GPT2CausalLM, diff --git a/keras_hub/src/models/gemma3n/__init__.py b/keras_hub/src/models/gemma3n/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/keras_hub/src/models/gemma3n/gemma3n_attention.py b/keras_hub/src/models/gemma3n/gemma3n_attention.py new file mode 100644 index 0000000000..dc1adaadff --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_attention.py @@ -0,0 +1,605 @@ +import math + +import keras +import numpy as np + +from keras_hub.src.models.gemma3n.gemma3n_utils import apply_rotary_pos_emb +from keras_hub.src.models.gemma3n.gemma3n_utils import eager_attention_forward +from keras_hub.src.models.gemma3n.rms_normalization import Gemma3nRMSNorm + + +class Gemma3nAudioRelativePositionEmbedding(keras.layers.Layer): + """A layer for learning relative position embeddings for audio sequences. + + This layer implements the relative position embedding mechanism used in the + audio tower of the Gemma3n model. It computes position-aware attention + scores by generating a timing signal based on relative positions between + queries and keys, which is then projected and added to the content-based + attention logits. + + Args: + hidden_size: int. The size of the hidden state. + conf_num_attention_heads: int. The number of attention heads. + conf_attention_context_left: int. The number of steps to attend to in + the past, including the current step. + conf_attention_context_right: int. The number of steps to attend to in + the future. + """ + + def __init__( + self, + hidden_size, + conf_num_attention_heads, + conf_attention_context_left, + conf_attention_context_right, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.conf_num_attention_heads = conf_num_attention_heads + self.conf_attention_context_left = conf_attention_context_left + self.conf_attention_context_right = conf_attention_context_right + self.num_heads = conf_num_attention_heads + self.channels = hidden_size + self.head_dim = self.channels // self.num_heads + self.max_backward = max(0, conf_attention_context_left - 1) + self.max_forward = conf_attention_context_right + self.pos_proj = keras.layers.Dense( + self.num_heads * self.head_dim, + use_bias=False, + name="pos_proj", + dtype=self.dtype_policy, + ) + min_timescale = 1.0 + max_timescale = 1.0e4 + num_timescales = self.channels // 2 + log_timescale_increment = math.log( + float(max_timescale) / float(min_timescale) + ) / max(num_timescales - 1, 1) + inv_timescales = min_timescale * np.exp( + np.arange(num_timescales, dtype="float32") + * -log_timescale_increment + ) + self.inv_timescales = keras.ops.expand_dims( + keras.ops.expand_dims( + keras.ops.convert_to_tensor(inv_timescales), 0 + ), + 0, + ) + + def build(self, input_shape): + self.pos_proj.build((None, self.channels)) + super().build(input_shape) + + def _get_timing_signal_1d_pos(self, position, dtype): + position = keras.ops.cast( + keras.ops.expand_dims(position, axis=-1), "float32" + ) + scaled_time = position * keras.ops.cast(self.inv_timescales, "float32") + timing_signal = keras.ops.concatenate( + [keras.ops.sin(scaled_time), keras.ops.cos(scaled_time)], axis=-1 + ) + return keras.ops.cast(timing_signal, dtype) + + def _relative_shift( + self, + term_bd_before_shift, + batch_size, + num_heads, + num_query_blocks, + query_block_size, + key_context_size, + max_span_plus_1, + ): + pad_amount_last_dim = (key_context_size + 1) - max_span_plus_1 + padding_tuple = [[0, 0]] * (len(term_bd_before_shift.shape) - 1) + [ + [0, pad_amount_last_dim] + ] + term_bd_padded = keras.ops.pad(term_bd_before_shift, padding_tuple) + term_bd_reshaped = keras.ops.reshape( + term_bd_padded, + ( + batch_size, + num_heads, + -1, + ), + )[:, :, : query_block_size * key_context_size] + term_bd_shifted = keras.ops.reshape( + term_bd_reshaped, + ( + batch_size, + num_heads, + -1, + query_block_size, + key_context_size, + ), + ) + return term_bd_shifted + + def _int8_call(self, queries, keys): + original_dtype = queries.dtype + queries_calc = keras.ops.cast(queries, "float32") + keys_calc = keras.ops.cast(keys, "float32") + result_calc = self.call(queries_calc, keys_calc) + return keras.ops.cast(result_calc, original_dtype) + + def call(self, queries, keys): + batch_size = keras.ops.shape(queries)[0] + ( + _, + num_query_blocks, + query_block_size, + num_heads, + head_dim, + ) = queries.shape + _, _, key_context_size, _, _ = keys.shape + pos_indices = keras.ops.expand_dims( + keras.ops.arange( + self.max_backward, -self.max_forward - 1, -1, dtype="float32" + ), + 0, + ) + max_span_plus_1 = pos_indices.shape[1] + sin_emb_timing_signal = self._get_timing_signal_1d_pos( + pos_indices, dtype=queries.dtype + ) + projected_sin_emb = self.pos_proj(sin_emb_timing_signal) + sin_emb = keras.ops.squeeze( + keras.ops.reshape( + projected_sin_emb, + (1, max_span_plus_1, self.num_heads, self.head_dim), + ), + axis=0, + ) + queries_p = keras.ops.transpose(queries, (0, 3, 1, 2, 4)) + keys_p_t = keras.ops.transpose(keys, (0, 3, 1, 4, 2)) + term_ac = keras.ops.matmul(queries_p, keys_p_t) + q_permuted = keras.ops.transpose(queries, (0, 3, 1, 2, 4)) + s_permuted = keras.ops.transpose(sin_emb, (1, 2, 0)) + + q_reshaped_dim = -1 + if num_query_blocks is not None: + q_reshaped_dim = num_query_blocks * query_block_size + + q_reshaped = keras.ops.reshape( + q_permuted, + ( + batch_size * num_heads, + q_reshaped_dim, + head_dim, + ), + ) + term_bd_unshifed_matmul = keras.ops.matmul(q_reshaped, s_permuted) + term_bd_unshifed = keras.ops.reshape( + term_bd_unshifed_matmul, + ( + batch_size, + num_heads, + -1, + query_block_size, + max_span_plus_1, + ), + ) + term_bd_shifted = self._relative_shift( + term_bd_unshifed, + batch_size, + num_heads, + num_query_blocks, + query_block_size, + key_context_size, + max_span_plus_1, + ) + return term_ac + term_bd_shifted + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "conf_num_attention_heads": self.conf_num_attention_heads, + "conf_attention_context_left": self.conf_attention_context_left, + "conf_attention_context_right": self.conf_attention_context_right, # noqa: E501 + } + ) + return config + + +class Gemma3nTextAttention(keras.layers.Layer): + """A multi-head attention layer for text sequences. + + This layer implements the text attention mechanism for the Gemma3n model, + which is a standard multi-head attention architecture. It includes features + such as Grouped-Query Attention (GQA), RMS Normalization for query and key + states, and Rotary Position Embeddings (RoPE) to incorporate positional + information. + + Args: + hidden_size: int. The size of the hidden state. + num_attention_heads: int. The number of query attention heads. + num_key_value_heads: int. The number of key and value attention heads. + If `num_key_value_heads` is not equal to `num_attention_heads`, this + layer implements Grouped-Query Attention. + head_dim: int. The dimension of each attention head. + attention_dropout: float. Dropout probability for the attention scores. + attention_bias: bool. If `True`, dense layers for query, key, value, + and output projections will use a bias term. + rms_norm_eps: float. The epsilon value for RMS Normalization layers. + sliding_window: int, optional. The size of the sliding window for + local attention. If `None`, global attention is used. Defaults to + `None`. + """ + + def __init__( + self, + hidden_size, + num_attention_heads, + num_key_value_heads, + head_dim, + attention_dropout, + attention_bias, + rms_norm_eps, + sliding_window=None, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + self.num_key_value_heads = num_key_value_heads + self.head_dim = head_dim + self.attention_dropout = attention_dropout + self.attention_bias = attention_bias + self.rms_norm_eps = rms_norm_eps + self.sliding_window = sliding_window + self.num_key_value_groups = ( + self.num_attention_heads // self.num_key_value_heads + ) + self.q_proj = keras.layers.Dense( + self.num_attention_heads * self.head_dim, + use_bias=self.attention_bias, + name="q_proj", + dtype=self.dtype_policy, + ) + self.k_proj = keras.layers.Dense( + self.num_key_value_heads * self.head_dim, + use_bias=self.attention_bias, + name="k_proj", + dtype=self.dtype_policy, + ) + self.v_proj = keras.layers.Dense( + self.num_key_value_heads * self.head_dim, + use_bias=self.attention_bias, + name="v_proj", + dtype=self.dtype_policy, + ) + self.o_proj = keras.layers.Dense( + self.hidden_size, + use_bias=self.attention_bias, + name="o_proj", + dtype=self.dtype_policy, + ) + self.q_norm = Gemma3nRMSNorm( + dim=self.head_dim, + eps=self.rms_norm_eps, + name="q_norm", + dtype=self.dtype_policy, + ) + self.k_norm = Gemma3nRMSNorm( + dim=self.head_dim, + eps=self.rms_norm_eps, + name="k_norm", + dtype=self.dtype_policy, + ) + self.v_norm = Gemma3nRMSNorm( + dim=self.head_dim, + eps=self.rms_norm_eps, + with_scale=False, + name="v_norm", + dtype=self.dtype_policy, + ) + + def build(self, input_shape): + self.q_proj.build(input_shape) + self.k_proj.build(input_shape) + self.v_proj.build(input_shape) + self.o_proj.build( + input_shape[:-1] + (self.num_attention_heads * self.head_dim,) + ) + norm_shape = input_shape[:-1] + ( + self.num_attention_heads, + self.head_dim, + ) + self.q_norm.build(norm_shape) + self.k_norm.build(norm_shape) + self.v_norm.build(norm_shape) + super().build(input_shape) + + def call( + self, hidden_states, position_embeddings, attention_mask, training=False + ): + input_shape = keras.ops.shape(hidden_states)[:-1] + cos, sin = position_embeddings + + query_states = self.q_proj(hidden_states) + query_states = keras.ops.reshape( + query_states, + input_shape + (self.num_attention_heads, self.head_dim), + ) + query_states = self.q_norm(query_states) + query_states = apply_rotary_pos_emb( + query_states, cos, sin, unsqueeze_dim=2 + ) + query_states = keras.ops.transpose(query_states, (0, 2, 1, 3)) + key_states = self.k_proj(hidden_states) + key_states = keras.ops.reshape( + key_states, input_shape + (self.num_key_value_heads, self.head_dim) + ) + key_states = self.k_norm(key_states) + key_states = apply_rotary_pos_emb(key_states, cos, sin, unsqueeze_dim=2) + key_states = keras.ops.transpose(key_states, (0, 2, 1, 3)) + value_states = self.v_proj(hidden_states) + value_states = keras.ops.reshape( + value_states, + input_shape + (self.num_key_value_heads, self.head_dim), + ) + value_states = self.v_norm(value_states) + value_states = keras.ops.transpose(value_states, (0, 2, 1, 3)) + attn_output, attn_weights = eager_attention_forward( + query_states, + key_states, + value_states, + self.num_key_value_groups, + self.head_dim, + attention_mask, + dropout=self.attention_dropout if training else 0.0, + training=training, + ) + attn_output = keras.ops.reshape(attn_output, input_shape + (-1,)) + attn_output = self.o_proj(attn_output) + return attn_output, attn_weights + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "num_attention_heads": self.num_attention_heads, + "num_key_value_heads": self.num_key_value_heads, + "head_dim": self.head_dim, + "attention_dropout": self.attention_dropout, + "attention_bias": self.attention_bias, + "rms_norm_eps": self.rms_norm_eps, + "sliding_window": self.sliding_window, + } + ) + return config + + +class Gemma3nAudioAttention(keras.layers.Layer): + """An attention layer specialized for audio sequences. + + This layer implements the attention mechanism for the audio tower of the + Gemma3n model. It is designed to handle long audio sequences by processing + the input in fixed-size chunks. For each chunk of queries, it attends to a + larger context of keys and values, defined by a left (past) and right + (future) context window. This allows the model to capture local and more + distant dependencies efficiently. + + Args: + hidden_size: int. The size of the hidden state. + conf_num_attention_heads: int. The number of attention heads. + conf_attention_chunk_size: int. The size of each processing chunk. + conf_attention_context_right: int. The number of steps to attend to in + the future. + conf_attention_context_left: int. The number of steps to attend to in + the past, including the current step. + conf_attention_logit_cap: float. The soft cap value to apply to the + attention logits. + """ + + def __init__( + self, + hidden_size, + conf_num_attention_heads, + conf_attention_chunk_size, + conf_attention_context_right, + conf_attention_context_left, + conf_attention_logit_cap, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.conf_num_attention_heads = conf_num_attention_heads + self.conf_attention_chunk_size = conf_attention_chunk_size + self.conf_attention_context_right = conf_attention_context_right + self.conf_attention_context_left = conf_attention_context_left + self.conf_attention_logit_cap = conf_attention_logit_cap + self.num_heads = conf_num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.chunk_size = conf_attention_chunk_size + self.max_future_horizon = conf_attention_context_right + self.max_past_horizon = max(0, conf_attention_context_left - 1) + self.attention_logits_soft_cap = conf_attention_logit_cap + self.context_size = ( + self.chunk_size + self.max_past_horizon + self.max_future_horizon + ) + self.relative_position_embedding = ( + Gemma3nAudioRelativePositionEmbedding( + hidden_size, + conf_num_attention_heads, + conf_attention_context_left, + conf_attention_context_right, + name="relative_position_embedding", + dtype=self.dtype_policy, + ) + ) + self.q_proj = keras.layers.Dense( + self.num_heads * self.head_dim, + use_bias=False, + name="q_proj", + dtype=self.dtype_policy, + ) + self.k_proj = keras.layers.Dense( + self.num_heads * self.head_dim, + use_bias=False, + name="k_proj", + dtype=self.dtype_policy, + ) + self.v_proj = keras.layers.Dense( + self.num_heads * self.head_dim, + use_bias=False, + name="v_proj", + dtype=self.dtype_policy, + ) + q_scale = self.head_dim**-0.5 + r_softplus_0 = 1.0 / np.log(1 + np.exp(0.0)) # softplus(0) for numpy + self.q_scale = q_scale * r_softplus_0 + + lower_causal_mask = np.tril( + np.ones((self.context_size, self.chunk_size), dtype=bool), k=0 + ).T + upper_causal_mask = np.tril( + np.ones((self.chunk_size, self.context_size), dtype=bool), + k=self.max_past_horizon + self.max_future_horizon, + ) + local_causal_valid_mask = np.ones( + (self.chunk_size, self.context_size), dtype=bool + ) + local_causal_valid_mask = ( + local_causal_valid_mask * lower_causal_mask * upper_causal_mask + ) + self.local_causal_valid_mask = keras.ops.convert_to_tensor( + local_causal_valid_mask + ) + self.softcap = keras.ops.convert_to_tensor( + self.attention_logits_soft_cap, dtype="float32" + ) + + def build(self, input_shape): + self.per_dim_scale = self.add_weight( + shape=(self.head_dim,), + initializer="zeros", + trainable=True, + name="per_dim_scale", + dtype=self.dtype_policy.variable_dtype, + ) + self.q_proj.build(input_shape) + self.k_proj.build(input_shape) + self.v_proj.build(input_shape) + self.relative_position_embedding.build(input_shape) + super().build(input_shape) + + def _pad_dim1(self, x, pad_left, pad_right): + paddings = [[0, 0], [pad_left, pad_right]] + [ + [0, 0] for _ in range(len(x.shape) - 2) + ] + return keras.ops.pad(x, paddings) + + def _convert_to_block(self, hidden_states): + b, t = keras.ops.shape(hidden_states)[:2] + tail_shape_list = list(hidden_states.shape[2:]) + num_blocks = (t + self.chunk_size - 1) // self.chunk_size + padding_len = num_blocks * self.chunk_size - t + hidden_states = self._pad_dim1(hidden_states, 0, padding_len) + permute_dims = [b, num_blocks, self.chunk_size] + tail_shape_list + return keras.ops.reshape(hidden_states, permute_dims) + + def _extract_block_context(self, hidden_states): + pad_left = self.max_past_horizon + pad_right = self.max_future_horizon + self.chunk_size - 1 + hidden_states = self._pad_dim1(hidden_states, pad_left, pad_right) + _, t = keras.ops.shape(hidden_states)[:2] + frame_len = self.context_size + frame_step = self.chunk_size + num_frames = (t - frame_len) // frame_step + 1 + + start_indices = keras.ops.arange(0, num_frames) * frame_step + frame_offsets = keras.ops.arange(0, frame_len) + indices = keras.ops.expand_dims( + start_indices, axis=1 + ) + keras.ops.expand_dims(frame_offsets, axis=0) + return keras.ops.take(hidden_states, indices, axis=1) + + def call(self, hidden_states, mask): + qkv_shape = keras.ops.shape(hidden_states)[:-1] + ( + self.num_heads, + self.head_dim, + ) + query_states = keras.ops.reshape(self.q_proj(hidden_states), qkv_shape) + key_states = keras.ops.reshape(self.k_proj(hidden_states), qkv_shape) + value_states = keras.ops.reshape(self.v_proj(hidden_states), qkv_shape) + per_dim_scale_sp = keras.ops.softplus(self.per_dim_scale) + query_states = query_states * self.q_scale * per_dim_scale_sp + batch_size, q_time = keras.ops.shape(query_states)[:2] + query_blocks = self._convert_to_block(query_states) + key_blocks = self._extract_block_context(key_states) + value_blocks = self._extract_block_context(value_states) + num_query_blocks = keras.ops.shape(query_blocks)[1] + original_valid_mask = keras.ops.logical_not(mask) + extracted_valid_mask_blocks = self._extract_block_context( + original_valid_mask + ) + if ( + len(extracted_valid_mask_blocks.shape) == 4 + and extracted_valid_mask_blocks.shape[2] + * extracted_valid_mask_blocks.shape[3] + == self.context_size + ): + extracted_valid_mask_blocks = keras.ops.reshape( + extracted_valid_mask_blocks, + (batch_size, num_query_blocks, self.context_size), + ) + condition_from_input_validity = keras.ops.expand_dims( + keras.ops.expand_dims(extracted_valid_mask_blocks, 1), -2 + ) + condition_from_causality = keras.ops.expand_dims( + keras.ops.expand_dims( + keras.ops.expand_dims(self.local_causal_valid_mask, 0), 0 + ), + 0, + ) + final_condition_for_where = keras.ops.logical_and( + condition_from_input_validity, + keras.ops.cast(condition_from_causality, "bool"), + ) + logits = self.relative_position_embedding(query_blocks, key_blocks) + softcap = keras.ops.cast(self.softcap, dtype=logits.dtype) + logits = logits / softcap + logits = keras.ops.tanh(logits) + logits = logits * softcap + min_val = np.finfo(keras.backend.floatx()).min + logits = keras.ops.where(final_condition_for_where, logits, min_val) + probabilities = keras.ops.softmax( + keras.ops.cast(logits, "float32"), axis=-1 + ) + probabilities = keras.ops.cast(probabilities, value_blocks.dtype) + context_vectors = keras.ops.einsum( + "bnuwc,bucnh->buwnh", probabilities, value_blocks + ) + context_vectors = keras.ops.reshape( + context_vectors, + ( + batch_size, + num_query_blocks * self.chunk_size, + self.num_heads, + self.head_dim, + ), + ) + context_vectors = context_vectors[:, :q_time] + return context_vectors + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "conf_num_attention_heads": self.conf_num_attention_heads, + "conf_attention_chunk_size": self.conf_attention_chunk_size, + "conf_attention_context_right": self.conf_attention_context_right, # noqa: E501 + "conf_attention_context_left": self.conf_attention_context_left, + "conf_attention_logit_cap": self.conf_attention_logit_cap, + } + ) + return config diff --git a/keras_hub/src/models/gemma3n/gemma3n_audio_encoder.py b/keras_hub/src/models/gemma3n/gemma3n_audio_encoder.py new file mode 100644 index 0000000000..0a4cdc05e6 --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_audio_encoder.py @@ -0,0 +1,511 @@ +import keras + +from keras_hub.src.models.gemma3n.gemma3n_audio_layers import ( + Gemma3nAudioConformerAttention, +) +from keras_hub.src.models.gemma3n.gemma3n_audio_layers import ( + Gemma3nAudioConformerFeedForward, +) +from keras_hub.src.models.gemma3n.gemma3n_audio_layers import ( + Gemma3nAudioConformerLightConv1d, +) +from keras_hub.src.models.gemma3n.gemma3n_audio_layers import ( + Gemma3nAudioSSCPConvBlock, +) +from keras_hub.src.models.gemma3n.rms_normalization import Gemma3nRMSNorm + + +class Gemma3nAudioSubSampleConvProjection(keras.layers.Layer): + """A convolutional projection layer that subsamples audio features. + + This layer applies two blocks of 2D convolutions to the input audio + spectrogram. Each block subsamples the input along the time and frequency + dimensions. The output is then flattened and projected to the model's + hidden size. + + Args: + input_feat_size: int. The number of frequency bins in the input + spectrogram. + hidden_size: int. The dimensionality of the output embeddings. + sscp_conv_channel_size: list of int. The number of output channels for + each of the two convolutional blocks. + sscp_conv_kernel_size: list of tuple of int. The kernel sizes for each + of the two convolutional blocks. + sscp_conv_stride_size: list of tuple of int. The stride sizes for each + of the two convolutional blocks. + sscp_conv_group_norm_eps: float. Epsilon value for the Group + Normalization layers within the convolutional blocks. + """ + + def __init__( + self, + input_feat_size, + hidden_size, + sscp_conv_channel_size, + sscp_conv_kernel_size, + sscp_conv_stride_size, + sscp_conv_group_norm_eps, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.input_feat_size = input_feat_size + self.sscp_conv_channel_size = sscp_conv_channel_size + self.sscp_conv_kernel_size = sscp_conv_kernel_size + self.sscp_conv_stride_size = sscp_conv_stride_size + self.sscp_conv_group_norm_eps = sscp_conv_group_norm_eps + current_f_for_block_input = input_feat_size + self.calculated_block_padding = [] + self.calculated_f_out_dims = [] + for i in range(2): + kernel_h, kernel_w = sscp_conv_kernel_size[i] + _, stride_w = sscp_conv_stride_size[i] + pad_t_top, pad_t_bottom, pad_f_left, pad_f_right = ( + 0, + kernel_h - 1, + 1, + 1, + ) + manual_padding_tuple = ( + pad_f_left, + pad_f_right, + pad_t_top, + pad_t_bottom, + ) + self.calculated_block_padding.append(manual_padding_tuple) + f_in_padded = current_f_for_block_input + pad_f_left + pad_f_right + f_out_after_conv = (f_in_padded - kernel_w) // stride_w + 1 + self.calculated_f_out_dims.append(f_out_after_conv) + current_f_for_block_input = f_out_after_conv + self.conv_0 = Gemma3nAudioSSCPConvBlock( + idx=0, + input_freq_dim=input_feat_size, + sscp_conv_channel_size=sscp_conv_channel_size, + sscp_conv_kernel_size=sscp_conv_kernel_size, + sscp_conv_stride_size=sscp_conv_stride_size, + sscp_conv_group_norm_eps=sscp_conv_group_norm_eps, + manual_padding=self.calculated_block_padding[0], + name="conv_0", + dtype=self.dtype_policy, + ) + self.conv_1 = Gemma3nAudioSSCPConvBlock( + idx=1, + name="conv_1", + input_freq_dim=self.calculated_f_out_dims[0], + sscp_conv_channel_size=sscp_conv_channel_size, + sscp_conv_kernel_size=sscp_conv_kernel_size, + sscp_conv_stride_size=sscp_conv_stride_size, + sscp_conv_group_norm_eps=sscp_conv_group_norm_eps, + manual_padding=self.calculated_block_padding[1], + dtype=self.dtype_policy, + ) + self.input_proj_linear = keras.layers.Dense( + hidden_size, + use_bias=False, + name="input_proj_linear", + dtype=self.dtype_policy, + ) + + def build(self, input_shape): + _, t_in, f_in = input_shape + conv0_input_shape = (None, 1, t_in, f_in) + self.conv_0.build(conv0_input_shape) + if t_in is not None: + pad_t_top_0, pad_t_bottom_0 = self.calculated_block_padding[0][2:4] + kernel_h_0, _ = self.sscp_conv_kernel_size[0] + stride_h_0, _ = self.sscp_conv_stride_size[0] + t_padded_0 = t_in + pad_t_top_0 + pad_t_bottom_0 + t_out_0 = (t_padded_0 - kernel_h_0) // stride_h_0 + 1 + else: + t_out_0 = None + c_out_0 = self.sscp_conv_channel_size[0] + f_out_0 = self.calculated_f_out_dims[0] + conv1_input_shape = (None, c_out_0, t_out_0, f_out_0) + self.conv_1.build(conv1_input_shape) + super().build(input_shape) + + def compute_output_shape(self, input_shape): + b, t_in, f_in = input_shape + if t_in is not None: + _, _, pad_t_top_0, pad_t_bottom_0 = self.calculated_block_padding[0] + kernel_h_0, _ = self.sscp_conv_kernel_size[0] + stride_h_0, _ = self.sscp_conv_stride_size[0] + t_padded_0 = t_in + pad_t_top_0 + pad_t_bottom_0 + t_out_0 = (t_padded_0 - kernel_h_0) // stride_h_0 + 1 + _, _, pad_t_top_1, pad_t_bottom_1 = self.calculated_block_padding[1] + kernel_h_1, _ = self.sscp_conv_kernel_size[1] + stride_h_1, _ = self.sscp_conv_stride_size[1] + t_padded_1 = t_out_0 + pad_t_top_1 + pad_t_bottom_1 + t_out_1 = (t_padded_1 - kernel_h_1) // stride_h_1 + 1 + else: + t_out_1 = None + return (b, t_out_1, self.hidden_size) + + def call(self, audio_encodings): + audio_encodings_reshaped = keras.ops.expand_dims(audio_encodings, 1) + x = self.conv_0(audio_encodings_reshaped) + x = self.conv_1(x) + b, c_out, t_out, f_out = keras.ops.shape(x) + x_permuted = keras.ops.transpose(x, (0, 2, 3, 1)) + output_flattened = keras.ops.reshape( + x_permuted, (b, t_out, f_out * c_out) + ) + return self.input_proj_linear(output_flattened) + + def get_config(self): + config = super().get_config() + config.update( + { + "input_feat_size": self.input_feat_size, + "hidden_size": self.hidden_size, + "sscp_conv_channel_size": self.sscp_conv_channel_size, + "sscp_conv_kernel_size": self.sscp_conv_kernel_size, + "sscp_conv_stride_size": self.sscp_conv_stride_size, + "sscp_conv_group_norm_eps": self.sscp_conv_group_norm_eps, + } + ) + return config + + +class Gemma3nAudioConformerBlock(keras.layers.Layer): + """A single conformer block for processing audio sequences. + + This layer implements the conformer architecture, which consists of a + sequence of four modules: a feed-forward module, a multi-head + self-attention module, a convolution module, and a final feed-forward + module. The output of each module is added to its input through a residual + connection. + + Args: + hidden_size: int. The dimensionality of the input and output embeddings. + rms_norm_eps: float. Epsilon value for the Gemma 3n RMS normalization + layers. + gradient_clipping: float. The maximum absolute value for the gradient. + conf_residual_weight: float. The weight for the residual connection in + the feed-forward layers. + conf_num_attention_heads: int. The number of attention heads. + conf_attention_chunk_size: int. The size of chunks for local attention. + conf_attention_context_right: int. The right context size for local + attention. + conf_attention_context_left: int. The left context size for local + attention. + conf_attention_logit_cap: float. The maximum value for the attention + logits. + conf_conv_kernel_size: int. The kernel size for the 1D convolution + layer. + """ + + def __init__( + self, + hidden_size, + rms_norm_eps, + gradient_clipping, + conf_residual_weight, + conf_num_attention_heads, + conf_attention_chunk_size, + conf_attention_context_right, + conf_attention_context_left, + conf_attention_logit_cap, + conf_conv_kernel_size, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.rms_norm_eps = rms_norm_eps + self.gradient_clipping = gradient_clipping + self.conf_residual_weight = conf_residual_weight + self.conf_num_attention_heads = conf_num_attention_heads + self.conf_attention_chunk_size = conf_attention_chunk_size + self.conf_attention_context_right = conf_attention_context_right + self.conf_attention_context_left = conf_attention_context_left + self.conf_attention_logit_cap = conf_attention_logit_cap + self.conf_conv_kernel_size = conf_conv_kernel_size + self.ffw_layer_start = Gemma3nAudioConformerFeedForward( + hidden_size=hidden_size, + gradient_clipping=gradient_clipping, + conf_residual_weight=conf_residual_weight, + rms_norm_eps=rms_norm_eps, + dtype=self.dtype_policy, + name="ffw_layer_start", + ) + self.attention = Gemma3nAudioConformerAttention( + hidden_size=hidden_size, + gradient_clipping=gradient_clipping, + conf_num_attention_heads=conf_num_attention_heads, + conf_attention_chunk_size=conf_attention_chunk_size, + conf_attention_context_right=conf_attention_context_right, + conf_attention_context_left=conf_attention_context_left, + conf_attention_logit_cap=conf_attention_logit_cap, + dtype=self.dtype_policy, + name="attention", + ) + self.lconv1d = Gemma3nAudioConformerLightConv1d( + hidden_size=hidden_size, + rms_norm_eps=rms_norm_eps, + conf_conv_kernel_size=conf_conv_kernel_size, + gradient_clipping=gradient_clipping, + dtype=self.dtype_policy, + name="lconv1d", + ) + self.ffw_layer_end = Gemma3nAudioConformerFeedForward( + hidden_size=hidden_size, + gradient_clipping=gradient_clipping, + conf_residual_weight=conf_residual_weight, + rms_norm_eps=rms_norm_eps, + dtype=self.dtype_policy, + name="ffw_layer_end", + ) + self.norm = Gemma3nRMSNorm( + hidden_size, eps=rms_norm_eps, name="norm", dtype=self.dtype_policy + ) + + def build(self, input_shape): + audio_encodings_shape, _ = input_shape + self.ffw_layer_start.build(audio_encodings_shape) + self.attention.build(audio_encodings_shape) + self.lconv1d.build(audio_encodings_shape) + self.ffw_layer_end.build(audio_encodings_shape) + self.norm.build(audio_encodings_shape) + super().build(input_shape) + + def compute_output_shape(self, input_shape): + audio_encodings_shape, _ = input_shape + return audio_encodings_shape + + def call(self, inputs): + audio_encodings, audio_mel_mask = inputs + audio_encodings = self.ffw_layer_start(audio_encodings) + audio_encodings = self.attention(audio_encodings, audio_mel_mask) + validity_mask_for_lconv = keras.ops.logical_not(audio_mel_mask) + audio_encodings_for_lconv_input = audio_encodings * keras.ops.cast( + keras.ops.expand_dims(validity_mask_for_lconv, -1), + audio_encodings.dtype, + ) + audio_encodings = self.lconv1d(audio_encodings_for_lconv_input) + audio_encodings = self.ffw_layer_end(audio_encodings) + audio_encodings = keras.ops.clip( + audio_encodings, -self.gradient_clipping, self.gradient_clipping + ) + output = self.norm(audio_encodings) + return output + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "rms_norm_eps": self.rms_norm_eps, + "gradient_clipping": self.gradient_clipping, + "conf_residual_weight": self.conf_residual_weight, + "conf_num_attention_heads": self.conf_num_attention_heads, + "conf_attention_chunk_size": self.conf_attention_chunk_size, + "conf_attention_context_right": self.conf_attention_context_right, # noqa: E501 + "conf_attention_context_left": self.conf_attention_context_left, + "conf_attention_logit_cap": self.conf_attention_logit_cap, + "conf_conv_kernel_size": self.conf_conv_kernel_size, + } + ) + return config + + +class Gemma3nAudioEncoder(keras.layers.Layer): + """The main audio encoder for the Gemma3n model. + + This layer combines a subsampling convolutional projection with a stack of + conformer blocks to encode audio spectrograms into a sequence of hidden + states. + + Args: + hidden_size: int. The dimensionality of the embeddings. + input_feat_size: int. The number of frequency bins in the input + spectrogram. + sscp_conv_channel_size: list of int. The number of output channels for + each of the two convolutional blocks in the subsampler. + sscp_conv_kernel_size: list of tuple of int. The kernel sizes for each + of the two convolutional blocks in the subsampler. + sscp_conv_stride_size: list of tuple of int. The stride sizes for each + of the two convolutional blocks in the subsampler. + sscp_conv_group_norm_eps: float. Epsilon value for the Group + Normalization layers in the subsampler. + conf_num_hidden_layers: int. The number of conformer blocks. + rms_norm_eps: float. Epsilon value for the Gemma 3n RMS normalization + layers. + gradient_clipping: float. The maximum absolute value for the gradient. + conf_residual_weight: float. The weight for the residual connection in + the feed-forward layers of the conformer blocks. + conf_num_attention_heads: int. The number of attention heads in the + conformer blocks. + conf_attention_chunk_size: int. The size of chunks for local attention + in the conformer blocks. + conf_attention_context_right: int. The right context size for local + attention in the conformer blocks. + conf_attention_context_left: int. The left context size for local + attention in the conformer blocks. + conf_attention_logit_cap: float. The maximum value for the attention + logits in the conformer blocks. + conf_conv_kernel_size: int. The kernel size for the 1D convolution + layer in the conformer blocks. + conf_reduction_factor: int. The factor by which to reduce the sequence + length of the final output. + """ + + def __init__( + self, + hidden_size, + input_feat_size, + sscp_conv_channel_size, + sscp_conv_kernel_size, + sscp_conv_stride_size, + sscp_conv_group_norm_eps, + conf_num_hidden_layers, + rms_norm_eps, + gradient_clipping, + conf_residual_weight, + conf_num_attention_heads, + conf_attention_chunk_size, + conf_attention_context_right, + conf_attention_context_left, + conf_attention_logit_cap, + conf_conv_kernel_size, + conf_reduction_factor, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.input_feat_size = input_feat_size + self.sscp_conv_channel_size = sscp_conv_channel_size + self.sscp_conv_kernel_size = sscp_conv_kernel_size + self.sscp_conv_stride_size = sscp_conv_stride_size + self.sscp_conv_group_norm_eps = sscp_conv_group_norm_eps + self.conf_num_hidden_layers = conf_num_hidden_layers + self.rms_norm_eps = rms_norm_eps + self.gradient_clipping = gradient_clipping + self.conf_residual_weight = conf_residual_weight + self.conf_num_attention_heads = conf_num_attention_heads + self.conf_attention_chunk_size = conf_attention_chunk_size + self.conf_attention_context_right = conf_attention_context_right + self.conf_attention_context_left = conf_attention_context_left + self.conf_attention_logit_cap = conf_attention_logit_cap + self.conf_conv_kernel_size = conf_conv_kernel_size + self.conf_reduction_factor = conf_reduction_factor + self.subsample_conv_projection = Gemma3nAudioSubSampleConvProjection( + input_feat_size, + hidden_size, + sscp_conv_channel_size, + sscp_conv_kernel_size, + sscp_conv_stride_size, + sscp_conv_group_norm_eps, + dtype=self.dtype_policy, + name="subsample_conv_projection", + ) + self.conformer = [ + Gemma3nAudioConformerBlock( + hidden_size, + rms_norm_eps, + gradient_clipping, + conf_residual_weight, + conf_num_attention_heads, + conf_attention_chunk_size, + conf_attention_context_right, + conf_attention_context_left, + conf_attention_logit_cap, + conf_conv_kernel_size, + dtype=self.dtype_policy, + name=f"conformer_block_{i}", + ) + for i in range(conf_num_hidden_layers) + ] + + def build(self, input_shape): + audio_mel_shape, _ = input_shape + self.subsample_conv_projection.build(audio_mel_shape) + encodings_shape = self.subsample_conv_projection.compute_output_shape( + audio_mel_shape + ) + t_sub = encodings_shape[1] + time_stride_product = 1 + for stride_pair in self.sscp_conv_stride_size: + time_stride_product *= stride_pair[0] + batch_size = ( + audio_mel_shape[0] if audio_mel_shape[0] is not None else -1 + ) + current_mask_shape = (batch_size, t_sub) + current_encodings_shape = encodings_shape + for block in self.conformer: + block.build((current_encodings_shape, current_mask_shape)) + current_encodings_shape = block.compute_output_shape( + (current_encodings_shape, current_mask_shape) + ) + super().build(input_shape) + + def compute_output_shape(self, input_shape): + audio_mel_shape, _ = input_shape + encodings_shape = self.subsample_conv_projection.compute_output_shape( + audio_mel_shape + ) + current_encodings_shape = encodings_shape + for block in self.conformer: + current_encodings_shape = block.compute_output_shape( + (current_encodings_shape, None) + ) + if self.conf_reduction_factor > 1: + t_sub = current_encodings_shape[1] + if t_sub is not None: + new_t = t_sub // self.conf_reduction_factor + current_encodings_shape = ( + current_encodings_shape[0], + new_t, + current_encodings_shape[2], + ) + return current_encodings_shape, None + + def call(self, inputs): + audio_mel, audio_mel_mask = inputs + audio_encodings = self.subsample_conv_projection(audio_mel) + t_sub = keras.ops.shape(audio_encodings)[1] + time_stride_product = 1 + for stride_pair in self.sscp_conv_stride_size: + time_stride_product *= stride_pair[0] + indices = keras.ops.arange(0, t_sub) * time_stride_product + indices = keras.ops.clip( + indices, 0, keras.ops.shape(audio_mel_mask)[1] - 1 + ) + current_mask = keras.ops.take(audio_mel_mask, indices, axis=1) + for block in self.conformer: + audio_encodings = block((audio_encodings, current_mask)) + + if self.conf_reduction_factor > 1: + audio_encodings = audio_encodings[:, :: self.conf_reduction_factor] + current_mask = current_mask[:, :: self.conf_reduction_factor] + return audio_encodings * keras.ops.cast( + keras.ops.logical_not(keras.ops.expand_dims(current_mask, axis=-1)), + audio_encodings.dtype, + ), current_mask + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "input_feat_size": self.input_feat_size, + "sscp_conv_channel_size": self.sscp_conv_channel_size, + "sscp_conv_kernel_size": self.sscp_conv_kernel_size, + "sscp_conv_stride_size": self.sscp_conv_stride_size, + "sscp_conv_group_norm_eps": self.sscp_conv_group_norm_eps, + "conf_num_hidden_layers": self.conf_num_hidden_layers, + "rms_norm_eps": self.rms_norm_eps, + "gradient_clipping": self.gradient_clipping, + "conf_residual_weight": self.conf_residual_weight, + "conf_num_attention_heads": self.conf_num_attention_heads, + "conf_attention_chunk_size": self.conf_attention_chunk_size, + "conf_attention_context_right": self.conf_attention_context_right, # noqa: E501 + "conf_attention_context_left": self.conf_attention_context_left, + "conf_attention_logit_cap": self.conf_attention_logit_cap, + "conf_conv_kernel_size": self.conf_conv_kernel_size, + "conf_reduction_factor": self.conf_reduction_factor, + } + ) + return config diff --git a/keras_hub/src/models/gemma3n/gemma3n_audio_layers.py b/keras_hub/src/models/gemma3n/gemma3n_audio_layers.py new file mode 100644 index 0000000000..11d15813b9 --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_audio_layers.py @@ -0,0 +1,526 @@ +import keras + +from keras_hub.src.models.gemma3n.gemma3n_attention import Gemma3nAudioAttention +from keras_hub.src.models.gemma3n.rms_normalization import Gemma3nRMSNorm + + +class Gemma3nAudioCumulativeGroupNorm(keras.layers.Layer): + """A cumulative group normalization layer for audio features. + + This layer normalizes the input hidden states based on cumulative statistics + calculated over the time dimension. It is designed to process audio + spectrograms or similar sequential data. + + Args: + num_channels: int. The number of channels for normalization. + feature_dims: tuple. The dimensions of the features to be normalized. + eps: float. A small epsilon value to add to the variance to avoid + division by zero. + """ + + def __init__( + self, + num_channels, + feature_dims, + eps=1e-3, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.num_channels = num_channels + self.feature_dims = tuple(feature_dims) + self.eps = eps + self.reduction_axes = tuple(range(2, 2 + len(self.feature_dims) + 1)) + + def build(self, input_shape): + self.scale = self.add_weight( + shape=(self.num_channels,), + initializer="ones", + trainable=True, + name="scale", + dtype=self.dtype_policy.variable_dtype, + ) + super().build(input_shape) + + def _int8_call(self, hidden_states): + original_dtype = hidden_states.dtype + x_calc = keras.ops.cast(hidden_states, "float32") + result_calc = self.call(x_calc) + return keras.ops.cast(result_calc, original_dtype) + + def call(self, hidden_states): + input_dtype = hidden_states.dtype + x_calc = keras.ops.cast(hidden_states, "float32") + mask_calc = keras.ops.ones_like(x_calc, dtype="float32") + sum_values_at_t = keras.ops.sum( + x_calc, axis=self.reduction_axes, keepdims=True + ) + cum_sum_values = keras.ops.cumsum(sum_values_at_t, axis=1) + elements_in_group_at_t = keras.ops.sum( + mask_calc, axis=self.reduction_axes, keepdims=True + ) + cum_count_elements = keras.ops.cumsum(elements_in_group_at_t, axis=1) + safe_cum_count_elements = keras.ops.maximum(cum_count_elements, 1.0) + cum_mean = cum_sum_values / safe_cum_count_elements + squared_diff_from_mean = keras.ops.square(x_calc - cum_mean) + sum_sq_diff_at_t = keras.ops.sum( + squared_diff_from_mean, axis=self.reduction_axes, keepdims=True + ) + cum_sum_sq_diff = keras.ops.cumsum(sum_sq_diff_at_t, axis=1) + cum_variance = cum_sum_sq_diff / safe_cum_count_elements + normalized_x = (x_calc - cum_mean) * keras.ops.rsqrt( + cum_variance + self.eps + ) + scale_view_shape = [1] * (len(hidden_states.shape) - 1) + [ + self.num_channels + ] + reshaped_scale = keras.ops.reshape(self.scale, scale_view_shape) + normalized_x = normalized_x * keras.ops.cast(reshaped_scale, "float32") + final_output = normalized_x * mask_calc + return keras.ops.cast(final_output, input_dtype) + + def get_config(self): + config = super().get_config() + config.update( + { + "num_channels": self.num_channels, + "feature_dims": self.feature_dims, + "eps": self.eps, + } + ) + return config + + +class Gemma3nAudioSSCPConvBlock(keras.layers.Layer): + """A single SSCP (Spectrogram Sub-sampling Convolutional Preprocessor) + block. + + This block consists of a 2D convolution, a cumulative group normalization + layer, and a ReLU activation. It is used to process and downsample audio + spectrograms. + + Args: + idx: int. The index of the convolutional block. + input_freq_dim: int. The frequency dimension of the input spectrogram. + sscp_conv_channel_size: list or tuple. A sequence containing the number + of output channels for each convolutional block in the SSCP stack. + sscp_conv_kernel_size: list or tuple. A sequence of kernel sizes for + each convolutional block. + sscp_conv_stride_size: list or tuple. A sequence of stride sizes for + each convolutional block. + sscp_conv_group_norm_eps: float. The epsilon value for the cumulative + group normalization layer. + manual_padding: tuple. A tuple of 4 integers specifying the manual + padding to be applied as (pad_w_left, pad_w_right, pad_h_top, + pad_h_bottom). + """ + + def __init__( + self, + idx, + input_freq_dim, + sscp_conv_channel_size, + sscp_conv_kernel_size, + sscp_conv_stride_size, + sscp_conv_group_norm_eps, + manual_padding=(0, 0, 0, 0), + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.idx = idx + self.input_freq_dim = input_freq_dim + self.sscp_conv_channel_size = sscp_conv_channel_size + self.sscp_conv_kernel_size = sscp_conv_kernel_size + self.sscp_conv_stride_size = sscp_conv_stride_size + self.sscp_conv_group_norm_eps = sscp_conv_group_norm_eps + self.manual_padding = manual_padding + out_channels = sscp_conv_channel_size[idx] + kernel_h, kernel_w = sscp_conv_kernel_size[idx] + stride_h, stride_w = sscp_conv_stride_size[idx] + self.conv = keras.layers.Conv2D( + filters=out_channels, + kernel_size=(kernel_h, kernel_w), + strides=(stride_h, stride_w), + padding="valid", + use_bias=False, + data_format="channels_last", + name="conv", + dtype=self.dtype_policy, + ) + f_in_padded = ( + input_freq_dim + self.manual_padding[0] + self.manual_padding[1] + ) + f_out_conv = (f_in_padded - kernel_w) // stride_w + 1 + self.norm = Gemma3nAudioCumulativeGroupNorm( + num_channels=out_channels, + feature_dims=(f_out_conv,), + eps=sscp_conv_group_norm_eps, + name="norm", + dtype=self.dtype_policy, + ) + self.activation = keras.layers.ReLU( + name="activation", dtype=self.dtype_policy + ) + + def build(self, input_shape): + _, c_in, h, w = input_shape + if h is not None: + padded_h = h + self.manual_padding[2] + self.manual_padding[3] + else: + padded_h = None + padded_w = w + self.manual_padding[0] + self.manual_padding[1] + conv_input_shape = (None, padded_h, padded_w, c_in) + if not self.conv.built: + self.conv.build(conv_input_shape) + if h is not None: + h_out = (padded_h - self.conv.kernel_size[0]) // self.conv.strides[ + 0 + ] + 1 + else: + h_out = None + w_out = (padded_w - self.conv.kernel_size[1]) // self.conv.strides[ + 1 + ] + 1 + norm_input_shape = (None, h_out, w_out, self.conv.filters) + if not self.norm.built: + self.norm.build(norm_input_shape) + super().build(input_shape) + + def call(self, audio_encodings): + audio_encodings_nhwc = keras.ops.transpose( + audio_encodings, (0, 2, 3, 1) + ) + keras_padding = [ + [0, 0], + [self.manual_padding[2], self.manual_padding[3]], + [self.manual_padding[0], self.manual_padding[1]], + [0, 0], + ] + audio_encodings_padded = keras.ops.pad( + audio_encodings_nhwc, + keras_padding, + mode="constant", + constant_values=0.0, + ) + audio_encodings_conv = self.conv(audio_encodings_padded) + x_normed = self.norm(audio_encodings_conv) + audio_encodings_normed = keras.ops.transpose(x_normed, (0, 3, 1, 2)) + return self.activation(audio_encodings_normed) + + def get_config(self): + config = super().get_config() + config.update( + { + "idx": self.idx, + "input_freq_dim": self.input_freq_dim, + "sscp_conv_channel_size": self.sscp_conv_channel_size, + "sscp_conv_kernel_size": self.sscp_conv_kernel_size, + "sscp_conv_stride_size": self.sscp_conv_stride_size, + "sscp_conv_group_norm_eps": self.sscp_conv_group_norm_eps, + "manual_padding": self.manual_padding, + } + ) + return config + + +class Gemma3nAudioConformerFeedForward(keras.layers.Layer): + """The feed-forward module for the Conformer block. + + This module implements the feed-forward sub-layer of a Conformer block, + which consists of pre-layer normalization, two dense layers with a SiLU + activation function in between, post-layer normalization, and a residual + connection. + + Args: + hidden_size: int. The hidden size of the input and output tensors. + gradient_clipping: float. The maximum absolute value for gradient + clipping. + conf_residual_weight: float. The weight applied to the output of the + sub-layer before adding the residual connection. + rms_norm_eps: float. The epsilon value for the RMS normalization layers. + """ + + def __init__( + self, + hidden_size, + gradient_clipping, + conf_residual_weight, + rms_norm_eps, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.gradient_clipping = gradient_clipping + self.conf_residual_weight = conf_residual_weight + self.rms_norm_eps = rms_norm_eps + self.pre_layer_norm = Gemma3nRMSNorm( + hidden_size, + eps=rms_norm_eps, + name="pre_layer_norm", + dtype=self.dtype_policy, + ) + self.ffw_layer_1 = keras.layers.Dense( + hidden_size * 4, + use_bias=False, + name="ffw_layer_1", + dtype=self.dtype_policy, + ) + self.ffw_layer_2 = keras.layers.Dense( + hidden_size, + use_bias=False, + name="ffw_layer_2", + dtype=self.dtype_policy, + ) + self.post_layer_norm = Gemma3nRMSNorm( + hidden_size, + eps=rms_norm_eps, + name="post_layer_norm", + dtype=self.dtype_policy, + ) + + def build(self, input_shape): + self.pre_layer_norm.build(input_shape) + self.ffw_layer_1.build(input_shape) + ffw1_output_shape = input_shape[:-1] + (self.hidden_size * 4,) + self.ffw_layer_2.build(ffw1_output_shape) + self.post_layer_norm.build(input_shape) + super().build(input_shape) + + def call(self, audio_encodings): + residual = audio_encodings + audio_encodings = keras.ops.clip( + audio_encodings, -self.gradient_clipping, self.gradient_clipping + ) + audio_encodings = self.pre_layer_norm(audio_encodings) + audio_encodings = self.ffw_layer_1(audio_encodings) + audio_encodings = keras.activations.silu(audio_encodings) + audio_encodings = self.ffw_layer_2(audio_encodings) + audio_encodings = keras.ops.clip( + audio_encodings, -self.gradient_clipping, self.gradient_clipping + ) + audio_encodings = self.post_layer_norm(audio_encodings) + return residual + (audio_encodings * self.conf_residual_weight) + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "gradient_clipping": self.gradient_clipping, + "conf_residual_weight": self.conf_residual_weight, + "rms_norm_eps": self.rms_norm_eps, + } + ) + return config + + +class Gemma3nAudioConformerLightConv1d(keras.layers.Layer): + """The lightweight 1D convolution module for the Conformer block. + + This module implements the convolution sub-layer of a Conformer block, + which consists of pre-layer normalization, a gated linear unit (GLU), a + lightweight depthwise 1D convolution, and a final projection, followed by a + residual connection. + + Args: + hidden_size: int. The hidden size of the input and output tensors. + rms_norm_eps: float. The epsilon value for the RMS normalization layers. + conf_conv_kernel_size: int. The kernel size for the depthwise 1D + convolution. + gradient_clipping: float. The maximum absolute value for gradient + clipping. + """ + + def __init__( + self, + hidden_size, + rms_norm_eps, + conf_conv_kernel_size, + gradient_clipping, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.rms_norm_eps = rms_norm_eps + self.conf_conv_kernel_size = conf_conv_kernel_size + self.gradient_clipping = gradient_clipping + self.pre_layer_norm = Gemma3nRMSNorm( + hidden_size, + eps=rms_norm_eps, + name="pre_layer_norm", + dtype=self.dtype_policy, + ) + self.linear_start = keras.layers.Dense( + hidden_size * 2, + use_bias=False, + name="linear_start", + dtype=self.dtype_policy, + ) + self.depthwise_conv1d = keras.layers.DepthwiseConv1D( + kernel_size=conf_conv_kernel_size, + strides=1, + padding="valid", + use_bias=False, + data_format="channels_last", + name="depthwise_conv1d", + dtype=self.dtype_policy, + ) + self.conv_norm = Gemma3nRMSNorm( + hidden_size, + eps=rms_norm_eps, + name="conv_norm", + dtype=self.dtype_policy, + ) + self.linear_end = keras.layers.Dense( + hidden_size, + use_bias=False, + name="linear_end", + dtype=self.dtype_policy, + ) + self.causal_padding = conf_conv_kernel_size - 1 + + def build(self, input_shape): + self.pre_layer_norm.build(input_shape) + self.linear_start.build(input_shape) + glu_output_shape = input_shape[:-1] + (self.hidden_size,) + self.depthwise_conv1d.build(glu_output_shape) + self.conv_norm.build(glu_output_shape) + self.linear_end.build(glu_output_shape) + super().build(input_shape) + + def call(self, audio_encodings): + residual = audio_encodings + audio_encodings = self.pre_layer_norm(audio_encodings) + audio_encodings = self.linear_start(audio_encodings) + gated, activated = keras.ops.split(audio_encodings, 2, axis=-1) + audio_encodings = gated * keras.activations.sigmoid(activated) + + padded = keras.ops.pad( + audio_encodings, + [[0, 0], [self.causal_padding, 0], [0, 0]], + ) + audio_encodings = self.depthwise_conv1d(padded) + audio_encodings = keras.ops.clip( + audio_encodings, -self.gradient_clipping, self.gradient_clipping + ) + audio_encodings = self.conv_norm(audio_encodings) + audio_encodings = keras.activations.silu(audio_encodings) + audio_encodings = self.linear_end(audio_encodings) + return audio_encodings + residual + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "rms_norm_eps": self.rms_norm_eps, + "conf_conv_kernel_size": self.conf_conv_kernel_size, + "gradient_clipping": self.gradient_clipping, + } + ) + return config + + +class Gemma3nAudioConformerAttention(keras.layers.Layer): + """The attention module for the Conformer block. + + This module implements the multi-head self-attention sub-layer of a + Conformer block. It wraps the core attention mechanism with pre and post + layer normalization, a final dense projection, and a residual connection. + + Args: + hidden_size: int. The hidden size of the input and output tensors. + gradient_clipping: float. The maximum absolute value for gradient + clipping. + conf_num_attention_heads: int. The number of attention heads. + conf_attention_chunk_size: int. The chunk size for attention + computation, used for memory efficiency. + conf_attention_context_right: int. The right context size for attention. + conf_attention_context_left: int. The left context size for attention. + conf_attention_logit_cap: float. The value to which attention logits + are capped. + """ + + def __init__( + self, + hidden_size, + gradient_clipping, + conf_num_attention_heads, + conf_attention_chunk_size, + conf_attention_context_right, + conf_attention_context_left, + conf_attention_logit_cap, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.gradient_clipping = gradient_clipping + self.conf_num_attention_heads = conf_num_attention_heads + self.conf_attention_chunk_size = conf_attention_chunk_size + self.conf_attention_context_right = conf_attention_context_right + self.conf_attention_context_left = conf_attention_context_left + self.conf_attention_logit_cap = conf_attention_logit_cap + self.pre_attn_norm = Gemma3nRMSNorm( + hidden_size, name="pre_attn_norm", dtype=self.dtype_policy + ) + self.attn = Gemma3nAudioAttention( + hidden_size, + conf_num_attention_heads, + conf_attention_chunk_size, + conf_attention_context_right, + conf_attention_context_left, + conf_attention_logit_cap, + dtype=self.dtype_policy, + name="attn", + ) + self.post = keras.layers.Dense( + hidden_size, use_bias=False, name="post", dtype=self.dtype_policy + ) + self.post_norm = Gemma3nRMSNorm( + hidden_size, name="post_norm", dtype=self.dtype_policy + ) + + def build(self, input_shape): + self.pre_attn_norm.build(input_shape) + self.attn.build(input_shape) + self.post.build(input_shape) + self.post_norm.build(input_shape) + super().build(input_shape) + + def call(self, audio_encodings, audio_mel_mask): + residual = audio_encodings + audio_encodings = keras.ops.clip( + audio_encodings, -self.gradient_clipping, self.gradient_clipping + ) + audio_encodings_norm = self.pre_attn_norm(audio_encodings) + audio_encodings_attn_out = self.attn( + audio_encodings_norm, audio_mel_mask + ) + b, t, num_heads, head_dim = keras.ops.shape(audio_encodings_attn_out) + audio_encodings_reshaped = keras.ops.reshape( + audio_encodings_attn_out, (b, t, num_heads * head_dim) + ) + audio_encodings = self.post(audio_encodings_reshaped) + audio_encodings = keras.ops.clip( + audio_encodings, -self.gradient_clipping, self.gradient_clipping + ) + return residual + self.post_norm(audio_encodings) + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "gradient_clipping": self.gradient_clipping, + "conf_num_attention_heads": self.conf_num_attention_heads, + "conf_attention_chunk_size": self.conf_attention_chunk_size, + "conf_attention_context_right": self.conf_attention_context_right, # noqa: E501 + "conf_attention_context_left": self.conf_attention_context_left, + "conf_attention_logit_cap": self.conf_attention_logit_cap, + } + ) + return config diff --git a/keras_hub/src/models/gemma3n/gemma3n_backbone.py b/keras_hub/src/models/gemma3n/gemma3n_backbone.py new file mode 100644 index 0000000000..e939297432 --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_backbone.py @@ -0,0 +1,865 @@ +import keras + +from keras_hub.src.api_export import keras_hub_export +from keras_hub.src.models.backbone import Backbone +from keras_hub.src.models.gemma3n.gemma3n_audio_encoder import ( + Gemma3nAudioEncoder, +) +from keras_hub.src.models.gemma3n.gemma3n_text_model import Gemma3nTextModel +from keras_hub.src.models.gemma3n.rms_normalization import Gemma3nRMSNorm + + +class Gemma3nMultimodalEmbedder(keras.layers.Layer): + """A layer for handling multimodal embeddings. + + This layer manages embeddings for different modalities (here, vision, text, + and audio). It can take either token IDs or pre-computed embedding vectors + as input. The embeddings are normalized and projected to match the text + model's hidden size. + + Args: + multimodal_hidden_size: int. The hidden size of the multimodal + embeddings. + text_hidden_size: int. The hidden size of the text model. + rms_norm_eps: float. The epsilon value for the Gemma 3n RMS + normalization layers. + vocab_offset: int. The vocabulary offset for the specific modality. + vocab_size: int. The vocabulary size for the specific modality. + """ + + def __init__( + self, + multimodal_hidden_size, + text_hidden_size, + rms_norm_eps, + vocab_offset, + vocab_size, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.multimodal_hidden_size = multimodal_hidden_size + self.text_hidden_size = text_hidden_size + self.rms_norm_eps = rms_norm_eps + self.vocab_offset = vocab_offset + self.vocab_size = vocab_size + self.embedding = keras.layers.Embedding( + vocab_size, + multimodal_hidden_size, + name="embedding", + dtype=self.dtype_policy, + ) + self.hard_embedding_norm = Gemma3nRMSNorm( + multimodal_hidden_size, + eps=rms_norm_eps, + name="hard_embedding_norm", + dtype=self.dtype_policy, + ) + self.soft_embedding_norm = Gemma3nRMSNorm( + multimodal_hidden_size, + eps=rms_norm_eps, + name="soft_embedding_norm", + dtype=self.dtype_policy, + ) + self.embedding_projection = keras.layers.Dense( + text_hidden_size, + use_bias=False, + name="embedding_projection", + dtype=self.dtype_policy, + ) + self.embedding_post_projection_norm = Gemma3nRMSNorm( + text_hidden_size, + eps=rms_norm_eps, + with_scale=False, + name="embedding_post_projection_norm", + dtype=self.dtype_policy, + ) + + def build(self, input_shape): + embeds_shape = (None, None, self.multimodal_hidden_size) + self.hard_embedding_norm.build(embeds_shape) + self.soft_embedding_norm.build(embeds_shape) + self.embedding_projection.build(embeds_shape) + proj_shape = (None, None, self.text_hidden_size) + self.embedding_post_projection_norm.build(proj_shape) + self.embedding.build((None, None)) + super().build(input_shape) + + def call(self, inputs): + input_ids, inputs_embeds = None, None + if isinstance(inputs, list): + input_ids, inputs_embeds = inputs + elif "int" in str(inputs.dtype): + input_ids = inputs + else: + inputs_embeds = inputs + if (input_ids is None) and (inputs_embeds is None): + raise ValueError( + "You must specify either input_ids or inputs_embeds" + ) + if (input_ids is not None) and (inputs_embeds is not None): + raise ValueError( + "You can only specify one of input_ids or inputs_embeds" + ) + if inputs_embeds is not None: + emb_norm = self.soft_embedding_norm(inputs_embeds) + else: + index_to_lookup = input_ids - self.vocab_offset + hard_emb = self.embedding(index_to_lookup) + emb_norm = self.hard_embedding_norm(hard_emb) + + emb_norm_proj = self.embedding_projection(emb_norm) + return self.embedding_post_projection_norm(emb_norm_proj) + + def get_config(self): + config = super().get_config() + config.update( + { + "multimodal_hidden_size": self.multimodal_hidden_size, + "text_hidden_size": self.text_hidden_size, + "rms_norm_eps": self.rms_norm_eps, + "vocab_offset": self.vocab_offset, + "vocab_size": self.vocab_size, + } + ) + return config + + +class Gemma3nMultimodalEmbeddingProcessor(keras.layers.Layer): + """Processes and interleaves text, vision, and audio embeddings. + + This layer takes raw token IDs and multimodal inputs (pixel values, audio + features) and produces a final sequence of embeddings ready for the + decoder. It handles the embedding lookup for text and special tokens, + and replaces the special tokens with the processed features from the + vision and audio encoders. + + Args: + language_model: `keras_hub.models.gemma3n.Gemma3nTextModel`. The + underlying text model containing embedding layers. + vision_encoder: `keras.Model`. The vision encoder model. + embed_vision: `keras_hub.models.gemma3n.Gemma3nMultimodalEmbedder`. The + embedder for vision. + audio_encoder: `keras_hub.models.gemma3n.Gemma3nAudioEncoder`. The audio + encoder model. + embed_audio: `keras_hub.models.gemma3n.Gemma3nMultimodalEmbedder`. The + embedder for audio. + vision_soft_tokens_per_image: int. Number of tokens to represent an + image. + audio_soft_tokens_per_image: int. Number of tokens to represent an + audio clip. + image_token_id: int. The special token ID for images. + audio_token_id: int. The special token ID for audio. + vocab_size_per_layer_input: int. The vocabulary size for per-layer + inputs. + """ + + def __init__( + self, + language_model, + vision_encoder, + embed_vision, + audio_encoder, + embed_audio, + vision_soft_tokens_per_image, + audio_soft_tokens_per_image, + image_token_id, + audio_token_id, + vocab_size_per_layer_input, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.language_model = language_model + self.vision_encoder = vision_encoder + self.embed_vision = embed_vision + self.audio_encoder = audio_encoder + self.embed_audio = embed_audio + self.vision_soft_tokens_per_image = vision_soft_tokens_per_image + self.audio_soft_tokens_per_image = audio_soft_tokens_per_image + self.image_token_id = image_token_id + self.audio_token_id = audio_token_id + self.vocab_size_per_layer_input = vocab_size_per_layer_input + self.text_hidden_size = language_model.embed_tokens.embedding_dim + + def call(self, inputs): + input_ids = inputs["token_ids"] + pixel_values = inputs.get("pixel_values") + input_features = inputs.get("input_features") + input_features_mask = inputs.get("input_features_mask") + inputs_embeds = self.language_model.embed_tokens(input_ids) + per_layer_inputs_mask = keras.ops.logical_and( + input_ids >= 0, input_ids < self.vocab_size_per_layer_input + ) + per_layer_inputs_tokens = keras.ops.where( + per_layer_inputs_mask, input_ids, keras.ops.zeros_like(input_ids) + ) + per_layer_inputs = self.language_model.get_per_layer_inputs( + per_layer_inputs_tokens + ) + if self.vision_encoder: + vision_mask = keras.ops.logical_and( + input_ids >= self.embed_vision.vocab_offset, + input_ids < self.embed_audio.vocab_offset, + ) + dummy_vision_token_id = ( + self.embed_vision.vocab_offset + + self.embed_vision.embedding.input_dim + - 1 + ) + vision_input_ids = keras.ops.where( + vision_mask, input_ids, dummy_vision_token_id + ) + vision_embeds_from_vocab = self.embed_vision(vision_input_ids) + expanded_vision_mask = keras.ops.expand_dims(vision_mask, axis=-1) + inputs_embeds = keras.ops.where( + expanded_vision_mask, + vision_embeds_from_vocab, + inputs_embeds, + ) + if self.audio_encoder: + audio_mask = input_ids >= self.embed_audio.vocab_offset + dummy_audio_token_id = ( + self.embed_audio.vocab_offset + + self.embed_audio.embedding.input_dim + - 1 + ) + audio_input_ids = keras.ops.where( + audio_mask, input_ids, dummy_audio_token_id + ) + audio_embeds_from_vocab = self.embed_audio(audio_input_ids) + expanded_audio_mask = keras.ops.expand_dims(audio_mask, axis=-1) + inputs_embeds = keras.ops.where( + expanded_audio_mask, audio_embeds_from_vocab, inputs_embeds + ) + + if pixel_values is not None and self.vision_encoder: + reshape_target = (-1,) + tuple(self.vision_encoder.image_shape) + pixel_values = keras.ops.reshape(pixel_values, reshape_target) + vision_features = self.vision_encoder(pixel_values) + if self.vision_encoder.data_format == "channels_first": + vision_features = keras.ops.transpose( + vision_features, (0, 2, 3, 1) + ) + shape = keras.ops.shape(vision_features) + vision_features = keras.ops.reshape( + vision_features, (shape[0], shape[1] * shape[2], shape[3]) + ) + vision_features *= keras.ops.sqrt( + keras.ops.cast( + self.vision_encoder.num_features, dtype=inputs_embeds.dtype + ) + ) + vision_embeds = self.embed_vision(vision_features) + image_token_mask = keras.ops.equal(input_ids, self.image_token_id) + + def scatter_vision_features(): + batch_size, seq_len, hidden_size = keras.ops.shape( + inputs_embeds + ) + num_soft_tokens = self.vision_soft_tokens_per_image + start_mask_f32 = keras.ops.cast( + image_token_mask, dtype="float32" + ) + start_mask_f32 = keras.ops.expand_dims(start_mask_f32, axis=-1) + kernel = keras.ops.ones( + (num_soft_tokens, 1, 1), dtype="float32" + ) + padded_mask = keras.ops.pad( + start_mask_f32, + [[0, 0], [num_soft_tokens - 1, 0], [0, 0]], + ) + full_mask_f32 = keras.ops.conv( + padded_mask, kernel, strides=1, padding="valid" + ) + full_mask = keras.ops.cast( + keras.ops.squeeze(full_mask_f32, axis=-1) > 0.5, "bool" + ) + flat_vision_embeds = keras.ops.reshape( + vision_embeds, [-1, hidden_size] + ) + flat_full_mask = keras.ops.reshape(full_mask, [-1]) + gather_indices = ( + keras.ops.cumsum(keras.ops.cast(flat_full_mask, "int32")) + - 1 + ) + gather_indices = keras.ops.where( + flat_full_mask, gather_indices, 0 + ) + replacement_values = keras.ops.take( + flat_vision_embeds, gather_indices, axis=0 + ) + replacement_tensor = keras.ops.reshape( + replacement_values, (batch_size, seq_len, hidden_size) + ) + expanded_full_mask = keras.ops.expand_dims(full_mask, axis=-1) + return keras.ops.where( + expanded_full_mask, replacement_tensor, inputs_embeds + ) + + inputs_embeds = keras.ops.cond( + keras.ops.any(image_token_mask), + scatter_vision_features, + lambda: inputs_embeds, + ) + + if ( + input_features is not None + and input_features_mask is not None + and self.audio_encoder + ): + audio_features, _ = self.audio_encoder( + (input_features, input_features_mask) + ) + audio_embeds = self.embed_audio(audio_features) + shape = keras.ops.shape(audio_embeds) + audio_batch_size, audio_seq_len, hidden_size = ( + shape[0], + shape[1], + shape[2], + ) + target_len = self.audio_soft_tokens_per_image + last_audio_token_id = ( + self.embed_audio.vocab_offset + + self.embed_audio.embedding.input_dim + - 1 + ) + padding_toks = keras.ops.convert_to_tensor( + [[last_audio_token_id]], dtype="int64" + ) + padding_embs = self.embed_audio(padding_toks) + padding_token = keras.ops.squeeze(padding_embs, axis=[0]) + flat_audio_embeds = keras.ops.reshape( + audio_embeds, [-1, hidden_size] + ) + vocab = keras.ops.concatenate( + [flat_audio_embeds, padding_token], axis=0 + ) + pad_token_index = keras.ops.shape(flat_audio_embeds)[0] + indices = keras.ops.arange(target_len) + is_real_token = indices < audio_seq_len + batch_offsets = keras.ops.arange(audio_batch_size) * audio_seq_len + real_indices = keras.ops.expand_dims( + indices, 0 + ) + keras.ops.expand_dims(batch_offsets, 1) + final_indices = keras.ops.where( + keras.ops.expand_dims(is_real_token, 0), + real_indices, + pad_token_index, + ) + audio_embeds = keras.ops.take(vocab, final_indices, axis=0) + audio_token_mask = keras.ops.equal(input_ids, self.audio_token_id) + + def scatter_audio_features(): + batch_size, seq_len, hidden_size = keras.ops.shape( + inputs_embeds + ) + num_soft_tokens = self.audio_soft_tokens_per_image + start_mask_f32 = keras.ops.cast( + audio_token_mask, dtype="float32" + ) + start_mask_f32 = keras.ops.expand_dims(start_mask_f32, axis=-1) + kernel = keras.ops.ones( + (num_soft_tokens, 1, 1), dtype="float32" + ) + padded_mask = keras.ops.pad( + start_mask_f32, + [[0, 0], [num_soft_tokens - 1, 0], [0, 0]], + ) + full_mask_f32 = keras.ops.conv( + padded_mask, kernel, strides=1, padding="valid" + ) + full_mask = keras.ops.cast( + keras.ops.squeeze(full_mask_f32, axis=-1) > 0.5, "bool" + ) + flat_audio_embeds = keras.ops.reshape( + audio_embeds, [-1, hidden_size] + ) + flat_full_mask = keras.ops.reshape(full_mask, [-1]) + gather_indices = ( + keras.ops.cumsum(keras.ops.cast(flat_full_mask, "int32")) + - 1 + ) + gather_indices = keras.ops.where( + flat_full_mask, gather_indices, 0 + ) + replacement_values = keras.ops.take( + flat_audio_embeds, gather_indices, axis=0 + ) + replacement_tensor = keras.ops.reshape( + replacement_values, (batch_size, seq_len, hidden_size) + ) + expanded_full_mask = keras.ops.expand_dims(full_mask, axis=-1) + return keras.ops.where( + expanded_full_mask, replacement_tensor, inputs_embeds + ) + + inputs_embeds = keras.ops.cond( + keras.ops.any(audio_token_mask), + scatter_audio_features, + lambda: inputs_embeds, + ) + projected_per_layer_inputs = ( + self.language_model.project_per_layer_inputs( + inputs_embeds, per_layer_inputs + ) + ) + return inputs_embeds, projected_per_layer_inputs + + def get_config(self): + config = super().get_config() + config.update( + { + "language_model": keras.layers.serialize(self.language_model), + "vision_encoder": keras.layers.serialize(self.vision_encoder), + "embed_vision": keras.layers.serialize(self.embed_vision), + "audio_encoder": keras.layers.serialize(self.audio_encoder), + "embed_audio": keras.layers.serialize(self.embed_audio), + "vision_soft_tokens_per_image": self.vision_soft_tokens_per_image, # noqa: E501 + "audio_soft_tokens_per_image": self.audio_soft_tokens_per_image, + "image_token_id": self.image_token_id, + "audio_token_id": self.audio_token_id, + "vocab_size_per_layer_input": self.vocab_size_per_layer_input, + } + ) + return config + + @classmethod + def from_config(cls, config): + config = config.copy() + language_model = keras.layers.deserialize(config.pop("language_model")) + vision_encoder = keras.layers.deserialize(config.pop("vision_encoder")) + embed_vision = keras.layers.deserialize(config.pop("embed_vision")) + audio_encoder = keras.layers.deserialize(config.pop("audio_encoder")) + embed_audio = keras.layers.deserialize(config.pop("embed_audio")) + return cls( + language_model=language_model, + vision_encoder=vision_encoder, + embed_vision=embed_vision, + audio_encoder=audio_encoder, + embed_audio=embed_audio, + **config, + ) + + +@keras_hub_export("keras_hub.models.Gemma3nBackbone") +class Gemma3nBackbone(Backbone): + """The Gemma3n model backbone. + + This model is a multimodal transformer that can process text, image, and + audio inputs. It consists of a text decoder and optional vision and audio + encoders. + + Args: + text_vocab_size: int. The size of the text vocabulary. + text_hidden_size: int. The hidden size of the text model. + num_hidden_layers: int. The number of hidden layers in the text model. + pad_token_id: int. The ID of the padding token. + num_attention_heads: int. The number of attention heads in the text + model. + num_key_value_heads: int. The number of key-value heads for GQA. + head_dim: int. The dimension of each attention head. + intermediate_size: list[int]. A list of intermediate sizes for the MLP + layers. + hidden_activation: str. The activation function for the MLP layers. + layer_types: list[str]. A list of layer types ('full_attention' or + 'sliding_attention'). + sliding_window: int. The sliding window size for sliding window + attention. + rope_theta: float. The theta value for RoPE. + max_position_embeddings: int. The maximum sequence length. + vocab_size_per_layer_input: int. The vocab size for per-layer inputs. + hidden_size_per_layer_input: int. The hidden size for per-layer inputs. + altup_num_inputs: int. The number of inputs for the AltUp mechanism. + laurel_rank: int. The rank for the Laurel block. + attention_bias: bool. Whether to use a bias in the attention + projections. + attention_dropout: float. The dropout rate for attention weights. + rope_scaling: float. The scaling factor for RoPE. + rope_local_base_freq: float. The base frequency for local RoPE. + activation_sparsity_pattern: list[float]. The sparsity pattern for MLP + activations. + altup_coef_clip: float. The coefficient clipping value for AltUp. + altup_active_idx: int. The active index for AltUp. + altup_correct_scale: bool. Whether to correct the scale in AltUp. + num_kv_shared_layers: int. The number of shared KV layers. + vision_encoder_config: dict. The config for the vision encoder. + vision_hidden_size: int. The hidden size of the vision embeddings. + vision_vocab_size: int. The vocabulary size for vision tokens. + vision_vocab_offset: int. The vocabulary offset for vision tokens. + vision_soft_tokens_per_image: int. The number of tokens per image. + image_token_id: int. The special token ID for images. + audio_encoder_config: dict. The config for the audio encoder. + audio_hidden_size: int. The hidden size of the audio embeddings. + audio_vocab_size: int. The vocabulary size for audio tokens. + audio_vocab_offset: int. The vocabulary offset for audio tokens. + audio_soft_tokens_per_image: int. The number of tokens per audio clip. + audio_token_id: int. The special token ID for audio. + rms_norm_eps: float. The epsilon value for RMS normalization. + dtype: `None` or str or `keras.mixed_precision.DTypePolicy`. The dtype + to use for the model's computations and weights. Defaults to `None`. + + Example: + ```python + import numpy as np + from keras_hub.src.models.gemma3n.gemma3n_audio_encoder import ( + Gemma3nAudioEncoder, + ) + from keras_hub.src.models.gemma3n.gemma3n_backbone import Gemma3nBackbone + from keras_hub.src.models.mobilenetv5.mobilenetv5_backbone import ( + MobileNetV5Backbone, + ) + from keras_hub.src.models.mobilenetv5.mobilenetv5_builder import ( + decode_arch_def, + ) + + # Vision encoder config. + vision_arch_def = [["er_r1_k3_s1_e1_c16"]] + vision_block_args = decode_arch_def(vision_arch_def) + vision_encoder = MobileNetV5Backbone( + block_args=vision_block_args, + num_features=4, + image_shape=(224, 224, 3), + use_msfa=False, + ) + + # Audio encoder config. + audio_encoder = Gemma3nAudioEncoder( + hidden_size=8, + input_feat_size=32, + sscp_conv_channel_size=[4, 8], + sscp_conv_kernel_size=[(3, 3), (3, 3)], + sscp_conv_stride_size=[(2, 2), (2, 2)], + sscp_conv_group_norm_eps=1e-5, + conf_num_hidden_layers=1, + rms_norm_eps=1e-6, + gradient_clipping=1.0, + conf_residual_weight=0.5, + conf_num_attention_heads=1, + conf_attention_chunk_size=4, + conf_attention_context_right=5, + conf_attention_context_left=5, + conf_attention_logit_cap=50.0, + conf_conv_kernel_size=5, + conf_reduction_factor=1, + ) + + # Backbone config. + backbone = Gemma3nBackbone( + text_vocab_size=50, + text_hidden_size=8, + num_hidden_layers=1, + pad_token_id=0, + num_attention_heads=1, + num_key_value_heads=1, + head_dim=8, + intermediate_size=[16], + hidden_activation="gelu_approximate", + layer_types=["full_attention"], + sliding_window=4, + rope_theta=10000.0, + max_position_embeddings=16, + vocab_size_per_layer_input=50, + hidden_size_per_layer_input=2, + altup_num_inputs=2, + laurel_rank=1, + vision_encoder_config=vision_encoder.get_config(), + vision_hidden_size=16, + audio_encoder_config=audio_encoder.get_config(), + audio_hidden_size=8, + ) + + # Create dummy inputs. + input_data = { + "token_ids": np.random.randint(0, 50, size=(1, 16), dtype="int32"), + "attention_mask": np.ones((1, 1, 16, 16), dtype=bool), + "pixel_values": np.random.rand(1, 1, 224, 224, 3).astype("float32"), + "input_features": np.random.rand(1, 16, 32).astype("float32"), + "input_features_mask": np.zeros((1, 16), dtype=bool), + } + + # Forward pass. + outputs = backbone(input_data) + ``` + """ + + def __init__( + self, + text_vocab_size, + text_hidden_size, + num_hidden_layers, + pad_token_id, + num_attention_heads, + num_key_value_heads, + head_dim, + intermediate_size, + hidden_activation, + layer_types, + sliding_window, + rope_theta, + max_position_embeddings, + vocab_size_per_layer_input, + hidden_size_per_layer_input, + altup_num_inputs, + laurel_rank, + attention_bias=False, + attention_dropout=0.0, + rope_scaling=None, + rope_local_base_freq=10000.0, + activation_sparsity_pattern=None, + altup_coef_clip=None, + altup_active_idx=0, + altup_correct_scale=True, + num_kv_shared_layers=0, + vision_encoder_config=None, + vision_hidden_size=2048, + vision_vocab_size=128, + vision_vocab_offset=100, + vision_soft_tokens_per_image=256, + image_token_id=98, + audio_encoder_config=None, + audio_hidden_size=32, + audio_vocab_size=128, + audio_vocab_offset=228, + audio_soft_tokens_per_image=188, + audio_token_id=99, + rms_norm_eps=1e-6, + dtype=None, + **kwargs, + ): + # === Layers === + self.vision_encoder = None + if vision_encoder_config: + from keras_hub.src.models.mobilenetv5.mobilenetv5_backbone import ( + MobileNetV5Backbone, + ) + + vision_encoder_config["dtype"] = dtype + self.vision_encoder = MobileNetV5Backbone.from_config( + vision_encoder_config + ) + self.audio_encoder = None + if audio_encoder_config: + audio_config = audio_encoder_config.copy() + audio_config.pop("dtype", None) + self.audio_encoder = Gemma3nAudioEncoder( + dtype=dtype, **audio_config + ) + self.language_model = Gemma3nTextModel( + pad_token_id=pad_token_id, + vocab_size=text_vocab_size, + hidden_size=text_hidden_size, + num_hidden_layers=num_hidden_layers, + rms_norm_eps=rms_norm_eps, + num_attention_heads=num_attention_heads, + num_key_value_heads=num_key_value_heads, + head_dim=head_dim, + attention_bias=attention_bias, + attention_dropout=attention_dropout, + layer_types=layer_types, + sliding_window=sliding_window, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + rope_local_base_freq=rope_local_base_freq, + max_position_embeddings=max_position_embeddings, + intermediate_size=intermediate_size, + hidden_activation=hidden_activation, + activation_sparsity_pattern=activation_sparsity_pattern, + altup_num_inputs=altup_num_inputs, + altup_coef_clip=altup_coef_clip, + altup_active_idx=altup_active_idx, + altup_correct_scale=altup_correct_scale, + laurel_rank=laurel_rank, + hidden_size_per_layer_input=hidden_size_per_layer_input, + vocab_size_per_layer_input=vocab_size_per_layer_input, + num_kv_shared_layers=num_kv_shared_layers, + dtype=dtype, + name="text_model", + ) + self.embed_vision = None + if self.vision_encoder: + self.embed_vision = Gemma3nMultimodalEmbedder( + multimodal_hidden_size=vision_hidden_size, + text_hidden_size=text_hidden_size, + rms_norm_eps=rms_norm_eps, + vocab_offset=vision_vocab_offset, + vocab_size=vision_vocab_size, + dtype=dtype, + name="vision_embedder", + ) + self.embed_audio = None + if self.audio_encoder: + self.embed_audio = Gemma3nMultimodalEmbedder( + multimodal_hidden_size=audio_hidden_size, + text_hidden_size=text_hidden_size, + rms_norm_eps=rms_norm_eps, + vocab_offset=audio_vocab_offset, + vocab_size=audio_vocab_size, + dtype=dtype, + name="audio_embedder", + ) + self.embedding_processor = Gemma3nMultimodalEmbeddingProcessor( + language_model=self.language_model, + vision_encoder=self.vision_encoder, + embed_vision=self.embed_vision, + audio_encoder=self.audio_encoder, + embed_audio=self.embed_audio, + vision_soft_tokens_per_image=vision_soft_tokens_per_image, + audio_soft_tokens_per_image=audio_soft_tokens_per_image, + image_token_id=image_token_id, + audio_token_id=audio_token_id, + vocab_size_per_layer_input=vocab_size_per_layer_input, + dtype=dtype, + name="multimodal_embedding_processor", + ) + + # === Functional Model === + # === Model Inputs === + token_ids_input = keras.Input( + shape=(None,), dtype="int32", name="token_ids" + ) + attention_mask_input = keras.Input( + shape=(None, None, None), dtype="bool", name="attention_mask" + ) + processor_inputs = { + "token_ids": token_ids_input, + } + model_inputs = { + "token_ids": token_ids_input, + "attention_mask": attention_mask_input, + } + + # === Modality Feature Extraction and Interleaving === + if self.vision_encoder: + input_shape = (None,) + tuple(self.vision_encoder.image_shape) + pixel_values_input = keras.Input( + shape=input_shape, + dtype="float32", + name="pixel_values", + ) + processor_inputs["pixel_values"] = pixel_values_input + model_inputs["pixel_values"] = pixel_values_input + if self.audio_encoder: + input_features_input = keras.Input( + shape=(None, self.audio_encoder.input_feat_size), + dtype="float32", + name="input_features", + ) + input_features_mask_input = keras.Input( + shape=(None,), dtype="bool", name="input_features_mask" + ) + processor_inputs["input_features"] = input_features_input + processor_inputs["input_features_mask"] = input_features_mask_input + model_inputs["input_features"] = input_features_input + model_inputs["input_features_mask"] = input_features_mask_input + final_embeds, per_layer_inputs = self.embedding_processor( + processor_inputs + ) + + # === Decoder layers === + # The Gemma3nTextModel encapsulates the decoder loop and final norm. + # It requires `input_ids` for its internal per-layer logic. + sequence_output = self.language_model( + token_ids_input, + attention_mask_input, + final_embeds, + per_layer_inputs, + ) + super().__init__( + inputs=model_inputs, + outputs=sequence_output, + dtype=dtype, + **kwargs, + ) + + # === Config === + self.text_vocab_size = text_vocab_size + self.text_hidden_size = text_hidden_size + self.num_hidden_layers = num_hidden_layers + self.pad_token_id = pad_token_id + self.num_attention_heads = num_attention_heads + self.num_key_value_heads = num_key_value_heads + self.head_dim = head_dim + self.intermediate_size = intermediate_size + self.hidden_activation = hidden_activation + self.layer_types = layer_types + self.sliding_window = sliding_window + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + self.vocab_size_per_layer_input = vocab_size_per_layer_input + self.hidden_size_per_layer_input = hidden_size_per_layer_input + self.altup_num_inputs = altup_num_inputs + self.laurel_rank = laurel_rank + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + self.rope_scaling = rope_scaling + self.rope_local_base_freq = rope_local_base_freq + self.activation_sparsity_pattern = activation_sparsity_pattern + self.altup_coef_clip = altup_coef_clip + self.altup_active_idx = altup_active_idx + self.altup_correct_scale = altup_correct_scale + self.num_kv_shared_layers = num_kv_shared_layers + self.vision_encoder_config = vision_encoder_config + self.vision_hidden_size = vision_hidden_size + self.vision_vocab_size = vision_vocab_size + self.vision_vocab_offset = vision_vocab_offset + self.vision_soft_tokens_per_image = vision_soft_tokens_per_image + self.image_token_id = image_token_id + self.audio_encoder_config = audio_encoder_config + self.audio_hidden_size = audio_hidden_size + self.audio_vocab_size = audio_vocab_size + self.audio_vocab_offset = audio_vocab_offset + self.audio_soft_tokens_per_image = audio_soft_tokens_per_image + self.audio_token_id = audio_token_id + self.rms_norm_eps = rms_norm_eps + + def get_config(self): + config = super().get_config() + config.update( + { + "text_vocab_size": self.text_vocab_size, + "text_hidden_size": self.text_hidden_size, + "num_hidden_layers": self.num_hidden_layers, + "pad_token_id": self.pad_token_id, + "num_attention_heads": self.num_attention_heads, + "num_key_value_heads": self.num_key_value_heads, + "head_dim": self.head_dim, + "intermediate_size": self.intermediate_size, + "hidden_activation": self.hidden_activation, + "layer_types": self.layer_types, + "sliding_window": self.sliding_window, + "rope_theta": self.rope_theta, + "max_position_embeddings": self.max_position_embeddings, + "vocab_size_per_layer_input": self.vocab_size_per_layer_input, + "hidden_size_per_layer_input": self.hidden_size_per_layer_input, + "altup_num_inputs": self.altup_num_inputs, + "laurel_rank": self.laurel_rank, + "attention_bias": self.attention_bias, + "attention_dropout": self.attention_dropout, + "rope_scaling": self.rope_scaling, + "rope_local_base_freq": self.rope_local_base_freq, + "activation_sparsity_pattern": self.activation_sparsity_pattern, + "altup_coef_clip": self.altup_coef_clip, + "altup_active_idx": self.altup_active_idx, + "altup_correct_scale": self.altup_correct_scale, + "num_kv_shared_layers": self.num_kv_shared_layers, + "vision_encoder_config": self.vision_encoder_config, + "vision_hidden_size": self.vision_hidden_size, + "vision_vocab_size": self.vision_vocab_size, + "vision_vocab_offset": self.vision_vocab_offset, + "vision_soft_tokens_per_image": self.vision_soft_tokens_per_image, # noqa: E501 + "image_token_id": self.image_token_id, + "audio_encoder_config": self.audio_encoder_config, + "audio_hidden_size": self.audio_hidden_size, + "audio_vocab_size": self.audio_vocab_size, + "audio_vocab_offset": self.audio_vocab_offset, + "audio_soft_tokens_per_image": self.audio_soft_tokens_per_image, + "audio_token_id": self.audio_token_id, + "rms_norm_eps": self.rms_norm_eps, + } + ) + return config + + @classmethod + def from_config(cls, config): + return cls(**config) diff --git a/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py b/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py new file mode 100644 index 0000000000..23e261f98c --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py @@ -0,0 +1,185 @@ +from copy import deepcopy + +import numpy as np +import pytest +from absl.testing import parameterized + +try: + from keras_hub.src.models.mobilenetv5.mobilenetv5_backbone import ( + MobileNetV5Backbone, + ) + from keras_hub.src.models.mobilenetv5.mobilenetv5_builder import ( + decode_arch_def, + ) + + mobilenetv5 = True +except ImportError: + mobilenetv5 = False + +from keras_hub.src.models.gemma3n.gemma3n_audio_encoder import ( + Gemma3nAudioEncoder, +) +from keras_hub.src.models.gemma3n.gemma3n_backbone import Gemma3nBackbone +from keras_hub.src.tests.test_case import TestCase + + +@pytest.mark.skipif( + not mobilenetv5, + reason="The pull request for MobileNetV5 is still open.", +) +class Gemma3nBackboneTest(TestCase): + def setUp(self): + self.batch_size = 1 + self.text_vocab_size = 50 + self.text_sequence_length = 16 + self.image_height = 224 + self.image_width = 224 + self.audio_sequence_length = 16 + self.audio_feature_size = 32 + # === Vision Encoder === + if mobilenetv5: + vision_arch_def = [["er_r1_k3_s1_e1_c16"]] + vision_block_args = decode_arch_def(vision_arch_def) + vision_encoder = MobileNetV5Backbone( + block_args=vision_block_args, + num_features=4, + image_shape=(self.image_height, self.image_width, 3), + use_msfa=False, + ) + vision_encoder_config = vision_encoder.get_config() + else: + vision_encoder_config = None + # === Audio Encoder === + audio_encoder = Gemma3nAudioEncoder( + hidden_size=8, + input_feat_size=self.audio_feature_size, + sscp_conv_channel_size=[4, 8], + sscp_conv_kernel_size=[(3, 3), (3, 3)], + sscp_conv_stride_size=[(2, 2), (2, 2)], + sscp_conv_group_norm_eps=1e-5, + conf_num_hidden_layers=1, + rms_norm_eps=1e-6, + gradient_clipping=1.0, + conf_residual_weight=0.5, + conf_num_attention_heads=1, + conf_attention_chunk_size=4, + conf_attention_context_right=5, + conf_attention_context_left=5, + conf_attention_logit_cap=50.0, + conf_conv_kernel_size=5, + conf_reduction_factor=1, + ) + # === Multimodal === + self.multimodal_init_kwargs = { + "text_vocab_size": self.text_vocab_size, + "text_hidden_size": 8, + "num_hidden_layers": 1, + "pad_token_id": 0, + "num_attention_heads": 1, + "num_key_value_heads": 1, + "head_dim": 8, # hidden_size / num_attention_heads + "intermediate_size": [16], + "hidden_activation": "gelu_approximate", + "layer_types": ["full_attention"], + "sliding_window": 4, + "rope_theta": 10000.0, + "max_position_embeddings": self.text_sequence_length, + "vocab_size_per_layer_input": 50, + "hidden_size_per_layer_input": 2, + "altup_num_inputs": 2, + "laurel_rank": 1, + "vision_encoder_config": vision_encoder_config, + "vision_hidden_size": 16, + "audio_encoder_config": audio_encoder.get_config(), + "audio_hidden_size": 8, + } + self.multimodal_input_data = { + "token_ids": np.random.randint( + 0, + self.text_vocab_size, + size=(self.batch_size, self.text_sequence_length), + dtype="int32", + ), + "attention_mask": np.ones( + ( + self.batch_size, + 1, + self.text_sequence_length, + self.text_sequence_length, + ), + dtype=bool, + ), + "pixel_values": np.random.rand( + self.batch_size, 1, self.image_height, self.image_width, 3 + ).astype("float32"), + "input_features": np.random.rand( + self.batch_size, + self.audio_sequence_length, + self.audio_feature_size, + ).astype("float32"), + "input_features_mask": np.zeros( + (self.batch_size, self.audio_sequence_length), dtype=bool + ), + } + # === Text-Only === + self.text_init_kwargs = deepcopy(self.multimodal_init_kwargs) + del self.text_init_kwargs["vision_encoder_config"] + del self.text_init_kwargs["audio_encoder_config"] + del self.text_init_kwargs["vision_hidden_size"] + del self.text_init_kwargs["audio_hidden_size"] + self.text_input_data = deepcopy(self.multimodal_input_data) + del self.text_input_data["pixel_values"] + del self.text_input_data["input_features"] + del self.text_input_data["input_features_mask"] + + @parameterized.named_parameters( + ("multimodal", "multimodal"), ("text_only", "text_only") + ) + def test_backbone_basics(self, backbone_type): + if backbone_type == "multimodal": + init_kwargs = self.multimodal_init_kwargs + input_data = self.multimodal_input_data + else: + init_kwargs = self.text_init_kwargs + input_data = self.text_input_data + self.run_backbone_test( + cls=Gemma3nBackbone, + init_kwargs=init_kwargs, + input_data=input_data, + expected_output_shape=( + self.batch_size, + self.text_sequence_length, + init_kwargs["text_hidden_size"], + ), + ) + + @parameterized.named_parameters( + ("multimodal", "multimodal"), ("text_only", "text_only") + ) + def test_saved_model(self, backbone_type): + if backbone_type == "multimodal": + init_kwargs = self.multimodal_init_kwargs + input_data = self.multimodal_input_data + else: + init_kwargs = self.text_init_kwargs + input_data = self.text_input_data + self.run_model_saving_test( + cls=Gemma3nBackbone, + init_kwargs=init_kwargs, + input_data=input_data, + ) + + @parameterized.named_parameters( + ("multimodal", "multimodal", 10354, 7), + ("text_only", "text_only", 1450, 4), + ) + def test_architecture_characteristics( + self, backbone_type, num_params, num_layers + ): + if backbone_type == "multimodal": + init_kwargs = self.multimodal_init_kwargs + else: + init_kwargs = self.text_init_kwargs + model = Gemma3nBackbone(**init_kwargs) + self.assertEqual(model.count_params(), num_params) + self.assertEqual(len(model.layers), num_layers) diff --git a/keras_hub/src/models/gemma3n/gemma3n_text_decoder.py b/keras_hub/src/models/gemma3n/gemma3n_text_decoder.py new file mode 100644 index 0000000000..a071dd10fe --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_text_decoder.py @@ -0,0 +1,274 @@ +import math + +import keras + +from keras_hub.src.models.gemma3n.gemma3n_attention import Gemma3nTextAttention +from keras_hub.src.models.gemma3n.gemma3n_text_layers import Gemma3nTextAltUp +from keras_hub.src.models.gemma3n.gemma3n_text_layers import ( + Gemma3nTextLaurelBlock, +) +from keras_hub.src.models.gemma3n.gemma3n_text_layers import Gemma3nTextMLP +from keras_hub.src.models.gemma3n.rms_normalization import Gemma3nRMSNorm + + +class Gemma3nTextDecoderBlock(keras.layers.Layer): + """A layer that implements a single Gemma3n decoder block. + + This layer combines self-attention, feed-forward networks, and normalization + to process sequences. It includes specialized components like AltUp and + Laurel blocks for enhanced performance. + + Args: + hidden_size: int. The size of the hidden states. + rms_norm_eps: float. The epsilon value for the Gemma 3n RMS + normalization layers. + num_attention_heads: int. The number of attention heads. + num_key_value_heads: int. The number of key and value heads for + Grouped-Query Attention. + head_dim: int. The dimension of each attention head. + attention_bias: bool. If `True`, attention layers will use a bias. + attention_dropout: float. The dropout rate for the attention mechanism. + is_sliding: bool. If `True`, enables sliding window attention. + sliding_window: int. The size of the sliding window for attention. + intermediate_size: int. The size of the intermediate layer in the MLP. + hidden_activation: str. The activation function for the MLP. + activation_sparsity: float. Sparsity factor for the activation function. + altup_num_inputs: int. The number of inputs for the AltUp layer. + altup_coef_clip: float. Coefficient clipping value for the AltUp layer. + altup_active_idx: int. The index of the active prediction in the + AltUp layer. + altup_correct_scale: bool. Whether to scale the corrected output from + the AltUp layer. + laurel_rank: int. The rank for the Laurel block. + hidden_size_per_layer_input: int. The hidden size for the per-layer + input projection. + """ + + def __init__( + self, + hidden_size, + rms_norm_eps, + num_attention_heads, + num_key_value_heads, + head_dim, + attention_bias, + attention_dropout, + is_sliding, + sliding_window, + intermediate_size, + hidden_activation, + activation_sparsity, + altup_num_inputs, + altup_coef_clip, + altup_active_idx, + altup_correct_scale, + laurel_rank, + hidden_size_per_layer_input, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.rms_norm_eps = rms_norm_eps + self.num_attention_heads = num_attention_heads + self.num_key_value_heads = num_key_value_heads + self.head_dim = head_dim + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + self.is_sliding = is_sliding + self.sliding_window = sliding_window + self.intermediate_size = intermediate_size + self.hidden_activation = hidden_activation + self.activation_sparsity = activation_sparsity + self.altup_num_inputs = altup_num_inputs + self.altup_coef_clip = altup_coef_clip + self.altup_active_idx = altup_active_idx + self.altup_correct_scale = altup_correct_scale + self.laurel_rank = laurel_rank + self.hidden_size_per_layer_input = hidden_size_per_layer_input + self.attention = Gemma3nTextAttention( + hidden_size=hidden_size, + num_attention_heads=num_attention_heads, + num_key_value_heads=num_key_value_heads, + head_dim=head_dim, + attention_dropout=attention_dropout, + attention_bias=attention_bias, + rms_norm_eps=rms_norm_eps, + sliding_window=sliding_window if is_sliding else None, + name="attention", + dtype=self.dtype_policy, + ) + self.mlp = Gemma3nTextMLP( + hidden_size=hidden_size, + intermediate_size=intermediate_size, + hidden_activation=hidden_activation, + activation_sparsity=activation_sparsity, + name="mlp", + dtype=self.dtype_policy, + ) + self.input_layernorm = Gemma3nRMSNorm( + hidden_size, + eps=rms_norm_eps, + name="input_layernorm", + dtype=self.dtype_policy, + ) + self.post_attention_layernorm = Gemma3nRMSNorm( + hidden_size, + eps=rms_norm_eps, + name="post_attention_layernorm", + dtype=self.dtype_policy, + ) + self.pre_feedforward_layernorm = Gemma3nRMSNorm( + hidden_size, + eps=rms_norm_eps, + name="pre_feedforward_layernorm", + dtype=self.dtype_policy, + ) + self.post_feedforward_layernorm = Gemma3nRMSNorm( + hidden_size, + eps=rms_norm_eps, + name="post_feedforward_layernorm", + dtype=self.dtype_policy, + ) + self.altup = Gemma3nTextAltUp( + hidden_size=hidden_size, + altup_num_inputs=altup_num_inputs, + altup_coef_clip=altup_coef_clip, + altup_active_idx=altup_active_idx, + rms_norm_eps=rms_norm_eps, + altup_correct_scale=altup_correct_scale, + name="altup", + dtype=self.dtype_policy, + ) + self.laurel = Gemma3nTextLaurelBlock( + hidden_size=hidden_size, + laurel_rank=laurel_rank, + rms_norm_eps=rms_norm_eps, + name="laurel", + dtype=self.dtype_policy, + ) + self.per_layer_input_gate = keras.layers.Dense( + hidden_size_per_layer_input, + use_bias=False, + name="per_layer_input_gate", + dtype=self.dtype_policy, + ) + self.per_layer_projection = keras.layers.Dense( + hidden_size, + use_bias=False, + name="per_layer_projection", + dtype=self.dtype_policy, + ) + self.post_per_layer_input_norm = Gemma3nRMSNorm( + hidden_size, + eps=rms_norm_eps, + name="post_per_layer_input_norm", + dtype=self.dtype_policy, + ) + + def build(self, input_shape): + ( + hidden_states_shape, + _, + _, + per_layer_input_shape, + _, + ) = input_shape + active_prediction_shape = hidden_states_shape[1:] + self.input_layernorm.build(active_prediction_shape) + self.laurel.build(active_prediction_shape) + self.attention.build(active_prediction_shape) + self.post_attention_layernorm.build(active_prediction_shape) + self.pre_feedforward_layernorm.build(active_prediction_shape) + self.mlp.build(active_prediction_shape) + self.post_feedforward_layernorm.build(active_prediction_shape) + self.altup.build(hidden_states_shape) + self.per_layer_input_gate.build(active_prediction_shape) + self.per_layer_projection.build(per_layer_input_shape) + self.post_per_layer_input_norm.build(active_prediction_shape) + if self.hidden_activation == "gelu_approximate": + # NOTE: `gelu_pytorch_tanh` is the same as `gelu(approximate=True)`. + self.act_fn = lambda x: keras.activations.gelu(x, approximate=True) + else: + self.act_fn = keras.activations.get(self.hidden_activation) + super().build(input_shape) + + def call(self, inputs): + ( + hidden_states, + position_embeddings_global, + position_embeddings_local, + per_layer_input, + attention_mask, + ) = inputs + predictions = self.altup.predict(hidden_states) + active_prediction = predictions[self.altup_active_idx] + active_prediction_normed = self.input_layernorm(active_prediction) + laurel_output = self.laurel(active_prediction_normed) + position_embeddings = ( + position_embeddings_local + if self.is_sliding + else position_embeddings_global + ) + attn, _ = self.attention( + active_prediction_normed, position_embeddings, attention_mask + ) + attn = self.post_attention_layernorm(attn) + attn_gated = active_prediction + attn + attn_laurel = (attn_gated + laurel_output) / math.sqrt(2) + attn_norm = self.pre_feedforward_layernorm(attn_laurel) + attn_ffw = self.mlp(attn_norm) + attn_ffw_norm = self.post_feedforward_layernorm(attn_ffw) + attn_ffw_laurel_gated = attn_laurel + attn_ffw_norm + corrected_predictions = self.altup.correct( + predictions, attn_ffw_laurel_gated + ) + corrected_predictions_list = [ + corrected_predictions[i] + for i in range(corrected_predictions.shape[0]) + ] + first_prediction = corrected_predictions_list[self.altup_active_idx] + if self.altup_correct_scale: + first_prediction = self.altup.scale_corrected_output( + first_prediction + ) + first_prediction_gated = self.per_layer_input_gate(first_prediction) + first_prediction_activated = self.act_fn(first_prediction_gated) + first_prediction_multiplied = ( + first_prediction_activated * per_layer_input + ) + first_prediction_projected = self.per_layer_projection( + first_prediction_multiplied + ) + first_prediction_normed = self.post_per_layer_input_norm( + first_prediction_projected + ) + for i in range(1, len(corrected_predictions_list)): + corrected_predictions_list[i] += first_prediction_normed + return keras.ops.stack(corrected_predictions_list, axis=0) + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "rms_norm_eps": self.rms_norm_eps, + "num_attention_heads": self.num_attention_heads, + "num_key_value_heads": self.num_key_value_heads, + "head_dim": self.head_dim, + "attention_bias": self.attention_bias, + "attention_dropout": self.attention_dropout, + "is_sliding": self.is_sliding, + "sliding_window": self.sliding_window, + "intermediate_size": self.intermediate_size, + "hidden_activation": self.hidden_activation, + "activation_sparsity": self.activation_sparsity, + "altup_num_inputs": self.altup_num_inputs, + "altup_coef_clip": self.altup_coef_clip, + "altup_active_idx": self.altup_active_idx, + "altup_correct_scale": self.altup_correct_scale, + "laurel_rank": self.laurel_rank, + "hidden_size_per_layer_input": self.hidden_size_per_layer_input, + } + ) + return config diff --git a/keras_hub/src/models/gemma3n/gemma3n_text_layers.py b/keras_hub/src/models/gemma3n/gemma3n_text_layers.py new file mode 100644 index 0000000000..ba8d36eb04 --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_text_layers.py @@ -0,0 +1,426 @@ +import keras +import numpy as np + +from keras_hub.src.models.gemma3n.rms_normalization import Gemma3nRMSNorm + + +class Gemma3nTextScaledWordEmbedding(keras.layers.Layer): + """A layer that computes scaled word embeddings for Gemma3n models. + + This layer performs a standard embedding lookup and then scales the + resulting vectors by a specified factor. + + Args: + num_embeddings: int. The size of the vocabulary. + embedding_dim: int. The dimension of the embedding vectors. + embed_scale: float. The scaling factor applied to the embeddings. + """ + + def __init__( + self, + num_embeddings, + embedding_dim, + embed_scale=1.0, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.num_embeddings = num_embeddings + self.embedding_dim = embedding_dim + self.embed_scale = embed_scale + self.embedding = keras.layers.Embedding( + self.num_embeddings, + self.embedding_dim, + name="embedding", + dtype=self.dtype_policy, + ) + + def build(self, input_shape): + self.embedding.build(input_shape) + super().build(input_shape) + + def call(self, inputs): + return self.embedding(inputs) * self.embed_scale + + def get_config(self): + config = super().get_config() + config.update( + { + "num_embeddings": self.num_embeddings, + "embedding_dim": self.embedding_dim, + "embed_scale": self.embed_scale, + } + ) + return config + + +class Gemma3nTextRotaryEmbedding(keras.layers.Layer): + """A layer that computes rotary positional embeddings for Gemma3n models. + + This layer calculates the cosine and sine matrices for Rotary Positional + Embedding (RoPE), which are then applied to query and key tensors in the + attention mechanism to inject positional information. + + Args: + head_dim: int. The dimension of each attention head. + rope_theta: float. The base for the rotary frequency. + max_position_embeddings: int. The maximum sequence length that this + model might be used with. + rope_scaling: dict or `None`. Specifies the scaling strategy for RoPE. + base: float. The base value for the inverse frequency calculation. + """ + + def __init__( + self, + head_dim, + rope_theta, + max_position_embeddings, + rope_scaling, + base=10000, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.head_dim = head_dim + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + self.rope_scaling = rope_scaling + self.base = base + inv_freq = 1.0 / ( + self.base + ** (np.arange(0, self.head_dim, 2, dtype="float32") / self.head_dim) + ) + self.inv_freq = keras.ops.convert_to_tensor(inv_freq) + self.attention_scaling = 1.0 + + def call(self, x, position_ids): + inv_freq_expanded = keras.ops.expand_dims( + keras.ops.expand_dims(self.inv_freq, 0), -1 + ) + inv_freq_expanded = keras.ops.repeat( + inv_freq_expanded, repeats=keras.ops.shape(position_ids)[0], axis=0 + ) + position_ids_expanded = keras.ops.expand_dims( + keras.ops.cast(position_ids, "float32"), 1 + ) + + freqs = keras.ops.transpose( + keras.ops.matmul(inv_freq_expanded, position_ids_expanded), + (0, 2, 1), + ) + emb = keras.ops.concatenate([freqs, freqs], axis=-1) + cos = keras.ops.cos(emb) * self.attention_scaling + sin = keras.ops.sin(emb) * self.attention_scaling + return keras.ops.cast(cos, x.dtype), keras.ops.cast(sin, x.dtype) + + def get_config(self): + config = super().get_config() + config.update( + { + "head_dim": self.head_dim, + "rope_theta": self.rope_theta, + "max_position_embeddings": self.max_position_embeddings, + "rope_scaling": self.rope_scaling, + "base": self.base, + } + ) + return config + + +class Gemma3nTextMLP(keras.layers.Layer): + """A Gemma3n-specific feed-forward network (MLP) layer. + + This layer implements the MLP block used in Gemma3n transformer layers, + featuring a gated linear unit (GLU) structure. It can also apply activation + sparsity using a Gaussian top-k mechanism. + + Args: + hidden_size: int. The dimension of the hidden state. + intermediate_size: int. The dimension of the intermediate layer in the + MLP. + hidden_activation: str or callable. The activation function to use. + activation_sparsity: float. The target sparsity for activations, + enabling the Gaussian top-k mechanism if greater than 0. + """ + + def __init__( + self, + hidden_size, + intermediate_size, + hidden_activation, + activation_sparsity, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.hidden_activation = hidden_activation + self.activation_sparsity = activation_sparsity + self.gate_proj = keras.layers.Dense( + intermediate_size, + use_bias=False, + name="gate_proj", + dtype=self.dtype_policy, + ) + self.up_proj = keras.layers.Dense( + intermediate_size, + use_bias=False, + name="up_proj", + dtype=self.dtype_policy, + ) + self.down_proj = keras.layers.Dense( + hidden_size, + use_bias=False, + name="down_proj", + dtype=self.dtype_policy, + ) + if hidden_activation == "gelu_approximate": + # NOTE: `gelu_pytorch_tanh` is the same as `gelu(approximate=True)`. + self.act_fn = lambda x: keras.activations.gelu(x, approximate=True) + else: + self.act_fn = keras.activations.get(hidden_activation) + + def build(self, input_shape): + self.gate_proj.build(input_shape) + self.up_proj.build(input_shape) + self.down_proj.build((None, self.intermediate_size)) + super().build(input_shape) + + def _gaussian_topk(self, inputs): + target_sparsity_tensor = keras.ops.convert_to_tensor( + self.activation_sparsity, dtype="float32" + ) + std_multiplier = keras.ops.erfinv( + 2 * target_sparsity_tensor - 1 + ) * keras.ops.sqrt(keras.ops.convert_to_tensor(2.0, dtype="float32")) + std_multiplier = keras.ops.cast(std_multiplier, dtype=inputs.dtype) + inputs_mean = keras.ops.mean(inputs, axis=-1, keepdims=True) + inputs_std = keras.ops.std(inputs, axis=-1, keepdims=True) + cutoff_x = inputs_mean + inputs_std * std_multiplier + return keras.ops.relu(inputs - cutoff_x) + + def call(self, hidden_states): + gate_proj = self.gate_proj(hidden_states) + if self.activation_sparsity > 0.0: + gate_proj = self._gaussian_topk(gate_proj) + activations = self.act_fn(gate_proj) + up_proj = self.up_proj(hidden_states) + down_proj = self.down_proj(activations * up_proj) + return down_proj + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "intermediate_size": self.intermediate_size, + "hidden_activation": self.hidden_activation, + "activation_sparsity": self.activation_sparsity, + } + ) + return config + + +class Gemma3nTextLaurelBlock(keras.layers.Layer): + """A Laurel block layer for the Gemma3n model. + + This layer implements a low-rank residual block which applies a + down-projection to a specified rank, followed by an up-projection. The + result is normalized and added back to the original input, forming a + residual connection. + + Args: + hidden_size: int. The dimension of the hidden state. + laurel_rank: int. The rank of the low-rank adaptation. + rms_norm_eps: float. The epsilon value for the RMS normalization layer. + """ + + def __init__( + self, hidden_size, laurel_rank, rms_norm_eps, dtype=None, **kwargs + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.laurel_rank = laurel_rank + self.rms_norm_eps = rms_norm_eps + self.linear_left = keras.layers.Dense( + laurel_rank, + use_bias=False, + name="linear_left", + dtype=self.dtype_policy, + ) + self.linear_right = keras.layers.Dense( + hidden_size, + use_bias=False, + name="linear_right", + dtype=self.dtype_policy, + ) + self.post_laurel_norm = Gemma3nRMSNorm( + hidden_size, + eps=rms_norm_eps, + name="post_laurel_norm", + dtype=self.dtype_policy, + ) + + def build(self, input_shape): + self.linear_left.build(input_shape) + self.linear_right.build((None, self.laurel_rank)) + self.post_laurel_norm.build(input_shape) + super().build(input_shape) + + def call(self, hidden_states): + laurel_hidden_states = self.linear_left(hidden_states) + laurel_hidden_states = self.linear_right(laurel_hidden_states) + normed_laurel_hidden_states = self.post_laurel_norm( + laurel_hidden_states + ) + return hidden_states + normed_laurel_hidden_states + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "laurel_rank": self.laurel_rank, + "rms_norm_eps": self.rms_norm_eps, + } + ) + return config + + +class Gemma3nTextAltUp(keras.layers.Layer): + """An Alternating Update (AltUp) layer for the Gemma3n model. + + This layer implements the AltUp mechanism, which combines multiple input + modalities through a predict-and-correct cycle. It uses a router to compute + modality-specific coefficients for predicting and correcting hidden states. + + Args: + hidden_size: int. The dimension of the hidden state. + altup_num_inputs: int. The number of input modalities to the AltUp + block. + altup_coef_clip: float. The clipping value for coefficients. + altup_active_idx: int. The index of the currently active input. + rms_norm_eps: float. The epsilon value for the Gemma 3n RMS + normalization layers. + altup_correct_scale: bool. If `True`, enables a learnable scaling + factor on the corrected output. + """ + + def __init__( + self, + hidden_size, + altup_num_inputs, + altup_coef_clip, + altup_active_idx, + rms_norm_eps, + altup_correct_scale, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.hidden_size = hidden_size + self.altup_num_inputs = altup_num_inputs + self.altup_coef_clip = altup_coef_clip + self.altup_active_idx = altup_active_idx + self.rms_norm_eps = rms_norm_eps + + self.altup_correct_scale = altup_correct_scale + self.correct_output_scale = None + self.correction_coefs = keras.layers.Dense( + self.altup_num_inputs, + use_bias=False, + name="correction_coefs", + dtype=self.dtype_policy, + ) + self.prediction_coefs = keras.layers.Dense( + self.altup_num_inputs**2, + use_bias=False, + name="prediction_coefs", + dtype=self.dtype_policy, + ) + self.modality_router = keras.layers.Dense( + self.altup_num_inputs, + use_bias=False, + name="modality_router", + dtype=self.dtype_policy, + ) + self.router_norm = Gemma3nRMSNorm( + self.hidden_size, + eps=self.rms_norm_eps, + name="router_norm", + dtype=self.dtype_policy, + ) + self.router_input_scale = self.hidden_size**-1.0 + + def build(self, input_shape): + if self.altup_correct_scale: + self.correct_output_scale = self.add_weight( + shape=(self.hidden_size,), + initializer="zeros", + trainable=True, + name="correct_output_scale", + dtype=self.dtype_policy.variable_dtype, + ) + router_input_shape = input_shape[1:] + self.router_norm.build(router_input_shape) + self.modality_router.build(router_input_shape) + coefs_input_shape = router_input_shape[:-1] + (self.altup_num_inputs,) + self.correction_coefs.build(coefs_input_shape) + self.prediction_coefs.build(coefs_input_shape) + super().build(input_shape) + + def compute_router_modalities(self, x): + router_inputs = self.router_norm(x) * self.router_input_scale + routed = self.modality_router(router_inputs) + return keras.ops.cast( + keras.ops.tanh(keras.ops.cast(routed, "float32")), x.dtype + ) + + def predict(self, hidden_states): + modalities = self.compute_router_modalities( + hidden_states[self.altup_active_idx] + ) + all_coefs = keras.ops.reshape( + self.prediction_coefs(modalities), + modalities.shape[:-1] + + (self.altup_num_inputs, self.altup_num_inputs), + ) + all_coefs = keras.ops.transpose(all_coefs, (0, 1, 3, 2)) + predictions = keras.ops.matmul( + keras.ops.transpose(hidden_states, (1, 2, 3, 0)), all_coefs + ) + predictions = keras.ops.transpose(predictions, (3, 0, 1, 2)) + predictions += hidden_states + return predictions + + def correct(self, predictions, activated): + modalities = self.compute_router_modalities(activated) + innovation = activated - predictions[self.altup_active_idx] + innovation = keras.ops.repeat( + keras.ops.expand_dims(innovation, 0), self.altup_num_inputs, axis=0 + ) + all_coefs = self.correction_coefs(modalities) + 1.0 + all_coefs = keras.ops.expand_dims( + keras.ops.transpose(all_coefs, (2, 0, 1)), -1 + ) + corrected = innovation * all_coefs + corrected += predictions + return corrected + + def scale_corrected_output(self, corrected): + return corrected * self.correct_output_scale + + def get_config(self): + config = super().get_config() + config.update( + { + "hidden_size": self.hidden_size, + "altup_num_inputs": self.altup_num_inputs, + "altup_coef_clip": self.altup_coef_clip, + "altup_active_idx": self.altup_active_idx, + "rms_norm_eps": self.rms_norm_eps, + "altup_correct_scale": self.altup_correct_scale, + } + ) + return config diff --git a/keras_hub/src/models/gemma3n/gemma3n_text_model.py b/keras_hub/src/models/gemma3n/gemma3n_text_model.py new file mode 100644 index 0000000000..2a668cdc35 --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_text_model.py @@ -0,0 +1,384 @@ +import math + +import keras + +from keras_hub.src.models.gemma3n.gemma3n_text_decoder import ( + Gemma3nTextDecoderBlock, +) +from keras_hub.src.models.gemma3n.gemma3n_text_layers import ( + Gemma3nTextRotaryEmbedding, +) +from keras_hub.src.models.gemma3n.gemma3n_text_layers import ( + Gemma3nTextScaledWordEmbedding, +) +from keras_hub.src.models.gemma3n.rms_normalization import Gemma3nRMSNorm + + +class Gemma3nTextModel(keras.layers.Layer): + """The core Gemma3n text model layer. + + This layer implements the transformer architecture of the Gemma3n model. + It includes token embeddings, multiple decoder blocks, and final + normalization. + + Args: + pad_token_id: int. The id for the padding token. + vocab_size: int. The size of the vocabulary. + hidden_size: int. The size of the hidden states. + num_hidden_layers: int. The number of hidden layers in the transformer. + rms_norm_eps: float. The epsilon value for the RMS normalization layers. + num_attention_heads: int. The number of attention heads. + num_key_value_heads: int. The number of key-value heads for GQA. + head_dim: int. The dimension of each attention head. + attention_bias: bool. Whether to use a bias in the attention mechanism. + attention_dropout: float. The dropout rate for the attention scores. + layer_types: list of str. The type of each layer, e.g., + "sliding_attention". + sliding_window: int. The sliding window size for sliding window + attention. + rope_theta: float. The base frequency for Rotary Positional Embeddings. + rope_scaling: float or None. The scaling factor for RoPE. + rope_local_base_freq: float. The base frequency for local RoPE. + max_position_embeddings: int. The maximum sequence length. + intermediate_size: list of int. The size of the intermediate layer in + each of the feed-forward networks. + hidden_activation: str. The activation function for the hidden layers. + activation_sparsity_pattern: list of float or None. The sparsity pattern + for activations. + altup_num_inputs: int. The number of inputs for the AltUp mechanism. + altup_coef_clip: float. The coefficient clipping value for AltUp. + altup_active_idx: int. The active index for AltUp. + altup_correct_scale: bool. Whether to correct scaling in AltUp. + laurel_rank: int. The rank for LAUREL factorization. + hidden_size_per_layer_input: int. The hidden size for per-layer inputs. + vocab_size_per_layer_input: int. The vocabulary size for per-layer + inputs. + num_kv_shared_layers: int. The number of shared key-value layers. + """ + + def __init__( + self, + pad_token_id, + vocab_size, + hidden_size, + num_hidden_layers, + rms_norm_eps, + num_attention_heads, + num_key_value_heads, + head_dim, + attention_bias, + attention_dropout, + layer_types, + sliding_window, + rope_theta, + rope_scaling, + rope_local_base_freq, + max_position_embeddings, + intermediate_size, + hidden_activation, + activation_sparsity_pattern, + altup_num_inputs, + altup_coef_clip, + altup_active_idx, + altup_correct_scale, + laurel_rank, + hidden_size_per_layer_input, + vocab_size_per_layer_input, + num_kv_shared_layers, + dtype=None, + **kwargs, + ): + super().__init__(dtype=dtype, **kwargs) + self.pad_token_id = pad_token_id + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.rms_norm_eps = rms_norm_eps + self.num_attention_heads = num_attention_heads + self.num_key_value_heads = num_key_value_heads + self.head_dim = head_dim + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + self.layer_types = layer_types + self.sliding_window = sliding_window + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self.rope_local_base_freq = rope_local_base_freq + self.max_position_embeddings = max_position_embeddings + self.intermediate_size = intermediate_size + self.hidden_activation = hidden_activation + self.activation_sparsity_pattern = activation_sparsity_pattern + self.altup_num_inputs = altup_num_inputs + self.altup_coef_clip = altup_coef_clip + self.altup_active_idx = altup_active_idx + self.altup_correct_scale = altup_correct_scale + self.laurel_rank = laurel_rank + self.hidden_size_per_layer_input = hidden_size_per_layer_input + self.vocab_size_per_layer_input = vocab_size_per_layer_input + self.num_kv_shared_layers = num_kv_shared_layers + self.padding_idx = pad_token_id + self.embed_tokens = Gemma3nTextScaledWordEmbedding( + vocab_size, + hidden_size, + embed_scale=hidden_size**0.5, + name="embed_tokens", + dtype=self.dtype_policy, + ) + if activation_sparsity_pattern is None: + self.activation_sparsity_pattern = [0.0] * num_hidden_layers + self.layers = [ + Gemma3nTextDecoderBlock( + hidden_size, + rms_norm_eps, + num_attention_heads, + num_key_value_heads, + head_dim, + attention_bias, + attention_dropout, + layer_types[i] == "sliding_attention", + sliding_window, + intermediate_size[i], + hidden_activation, + self.activation_sparsity_pattern[i], + altup_num_inputs, + altup_coef_clip, + altup_active_idx, + altup_correct_scale, + laurel_rank, + hidden_size_per_layer_input, + name=f"decoder_block_{i}", + dtype=self.dtype_policy, + ) + for i in range(num_hidden_layers) + ] + self.norm = Gemma3nRMSNorm( + hidden_size, eps=rms_norm_eps, name="norm", dtype=self.dtype_policy + ) + self.rotary_emb = Gemma3nTextRotaryEmbedding( + head_dim, + rope_theta, + max_position_embeddings, + rope_scaling, + dtype=self.dtype_policy, + name="rotary_emb", + ) + self.rotary_emb_local = Gemma3nTextRotaryEmbedding( + head_dim, + rope_local_base_freq, + max_position_embeddings, + None, + dtype=self.dtype_policy, + name="rotary_emb_local", + ) + self.embed_tokens_per_layer = Gemma3nTextScaledWordEmbedding( + vocab_size_per_layer_input, + num_hidden_layers * hidden_size_per_layer_input, + embed_scale=hidden_size_per_layer_input**0.5, + name="embed_tokens_per_layer", + dtype=self.dtype_policy, + ) + self.per_layer_model_projection = keras.layers.Dense( + num_hidden_layers * hidden_size_per_layer_input, + use_bias=False, + name="per_layer_model_projection", + dtype=self.dtype_policy, + ) + self.per_layer_projection_norm = Gemma3nRMSNorm( + hidden_size_per_layer_input, + eps=rms_norm_eps, + name="per_layer_projection_norm", + dtype=self.dtype_policy, + ) + self.altup_projections = [ + keras.layers.Dense( + hidden_size, + use_bias=False, + name=f"altup_projection_{i}", + dtype=self.dtype_policy, + ) + for i in range(1, altup_num_inputs) + ] + self.altup_unembed_projections = [ + keras.layers.Dense( + hidden_size, + use_bias=False, + name=f"altup_unembed_projection_{i}", + dtype=self.dtype_policy, + ) + for i in range(1, altup_num_inputs) + ] + self.per_layer_projection_scale = hidden_size**-0.5 + self.per_layer_input_scale = 1.0 / math.sqrt(2.0) + + def build(self, input_shape): + if isinstance(input_shape, (list, tuple)) and isinstance( + input_shape[0], (list, tuple) + ): + input_ids_shape, _, inputs_embeds_shape, _ = input_shape + else: + input_ids_shape = input_shape + hidden_size = self.embed_tokens.embedding_dim + inputs_embeds_shape = input_ids_shape[:-1] + (hidden_size,) + self.embed_tokens.build(input_ids_shape) + self.embed_tokens_per_layer.build(input_ids_shape) + if not self.per_layer_model_projection.built: + self.per_layer_model_projection.build(inputs_embeds_shape) + per_layer_projection_norm_shape = ( + None, + None, + None, + self.hidden_size_per_layer_input, + ) + if not self.per_layer_projection_norm.built: + self.per_layer_projection_norm.build( + per_layer_projection_norm_shape + ) + for proj in self.altup_projections: + proj.build(inputs_embeds_shape) + for proj in self.altup_unembed_projections: + proj.build(inputs_embeds_shape) + decoder_hidden_states_shape = ( + self.altup_num_inputs, + ) + inputs_embeds_shape + decoder_per_layer_input_shape = input_ids_shape + ( + self.hidden_size_per_layer_input, + ) + decoder_input_shape = ( + decoder_hidden_states_shape, + None, # position_embeddings_global + None, # position_embeddings_local + decoder_per_layer_input_shape, + None, # attention_mask + ) + for layer in self.layers: + layer.build(decoder_input_shape) + self.norm.build(inputs_embeds_shape) + super().build(input_shape) + + def get_per_layer_inputs(self, input_ids): + embeds = self.embed_tokens_per_layer(input_ids) + return keras.ops.reshape( + embeds, + keras.ops.shape(input_ids) + + (self.num_hidden_layers, self.hidden_size_per_layer_input), + ) + + def project_per_layer_inputs(self, inputs_embeds, per_layer_inputs=None): + per_layer_projection = self.per_layer_model_projection(inputs_embeds) + per_layer_projection = ( + per_layer_projection * self.per_layer_projection_scale + ) + per_layer_projection = keras.ops.reshape( + per_layer_projection, + keras.ops.shape(inputs_embeds)[:-1] + + (self.num_hidden_layers, self.hidden_size_per_layer_input), + ) + per_layer_projection = self.per_layer_projection_norm( + per_layer_projection + ) + if per_layer_inputs is None: + return per_layer_projection + return ( + per_layer_projection + per_layer_inputs + ) * self.per_layer_input_scale + + def compute_output_shape(self, input_shape): + if isinstance(input_shape, (list, tuple)) and isinstance( + input_shape[0], (list, tuple) + ): + input_ids_shape = input_shape[0] + else: + input_ids_shape = input_shape + hidden_size = self.embed_tokens.embedding_dim + return input_ids_shape + (hidden_size,) + + def call(self, input_ids, attention_mask, inputs_embeds, per_layer_inputs): + position_ids = keras.ops.expand_dims( + keras.ops.arange(0, keras.ops.shape(input_ids)[1]), 0 + ) + hidden_states_0 = inputs_embeds + cos_global, sin_global = self.rotary_emb(hidden_states_0, position_ids) + cos_local, sin_local = self.rotary_emb_local( + hidden_states_0, position_ids + ) + target_magnitude = keras.ops.sqrt( + keras.ops.mean(hidden_states_0**2, axis=-1, keepdims=True) + ) + epsilon = 1e-5 + temp_hidden_states = [hidden_states_0] + for proj in self.altup_projections: + altup_proj = proj(hidden_states_0) + new_magnitude = keras.ops.sqrt( + keras.ops.maximum( + keras.ops.mean(altup_proj**2, axis=-1, keepdims=True), + epsilon, + ) + ) + current_hidden_state = altup_proj * target_magnitude / new_magnitude + temp_hidden_states.append(current_hidden_state) + hidden_states = keras.ops.stack(temp_hidden_states, axis=0) + for i, decoder_layer in enumerate(self.layers): + per_layer_input = per_layer_inputs[:, :, i, :] + hidden_states = decoder_layer( + ( + hidden_states, + (cos_global, sin_global), + (cos_local, sin_local), + per_layer_input, + attention_mask, + ) + ) + target_magnitude = keras.ops.sqrt( + keras.ops.mean(hidden_states[0] ** 2, axis=-1, keepdims=True) + ) + temp_hidden_states = [hidden_states[0]] + for i, proj in enumerate(self.altup_unembed_projections): + altup_unemb_proj = proj(hidden_states[i + 1]) + new_magnitude = keras.ops.sqrt( + keras.ops.maximum( + keras.ops.mean(altup_unemb_proj**2, axis=-1, keepdims=True), + epsilon, + ) + ) + current_hidden_state = ( + altup_unemb_proj * target_magnitude / new_magnitude + ) + temp_hidden_states.append(current_hidden_state) + hidden_states = keras.ops.stack(temp_hidden_states) + hidden_states = keras.ops.mean(hidden_states, axis=0) + return self.norm(hidden_states) + + def get_config(self): + config = super().get_config() + config.update( + { + "pad_token_id": self.pad_token_id, + "vocab_size": self.vocab_size, + "hidden_size": self.hidden_size, + "num_hidden_layers": self.num_hidden_layers, + "rms_norm_eps": self.rms_norm_eps, + "num_attention_heads": self.num_attention_heads, + "num_key_value_heads": self.num_key_value_heads, + "head_dim": self.head_dim, + "attention_bias": self.attention_bias, + "attention_dropout": self.attention_dropout, + "layer_types": self.layer_types, + "sliding_window": self.sliding_window, + "rope_theta": self.rope_theta, + "rope_scaling": self.rope_scaling, + "rope_local_base_freq": self.rope_local_base_freq, + "max_position_embeddings": self.max_position_embeddings, + "intermediate_size": self.intermediate_size, + "hidden_activation": self.hidden_activation, + "activation_sparsity_pattern": self.activation_sparsity_pattern, + "altup_num_inputs": self.altup_num_inputs, + "altup_coef_clip": self.altup_coef_clip, + "altup_active_idx": self.altup_active_idx, + "altup_correct_scale": self.altup_correct_scale, + "laurel_rank": self.laurel_rank, + "hidden_size_per_layer_input": self.hidden_size_per_layer_input, + "vocab_size_per_layer_input": self.vocab_size_per_layer_input, + "num_kv_shared_layers": self.num_kv_shared_layers, + } + ) + return config diff --git a/keras_hub/src/models/gemma3n/gemma3n_utils.py b/keras_hub/src/models/gemma3n/gemma3n_utils.py new file mode 100644 index 0000000000..0db8706d63 --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_utils.py @@ -0,0 +1,122 @@ +import keras + + +def rotate_half(x): + """Rotates half of the hidden dimensions of the input tensor. + + This function is used to implement rotary positional embeddings. It splits + the last dimension of the input tensor into two halves, negates the second + half, and then concatenates them back together. + + Args: + x: The input tensor. + + Returns: + A new tensor with the second half of the last dimension rotated. + """ + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return keras.ops.concatenate([-x2, x1], axis=-1) + + +def repeat_kv(hidden_states, n_rep): + """Repeats the key and value states for Grouped-Query Attention. + + This function is used in Grouped-Query Attention (GQA) to expand the key + and value states to match the number of query heads. + + Args: + hidden_states: The key or value tensor to be repeated, with a shape of + `[batch, num_key_value_heads, seq_len, head_dim]`. + n_rep: int. The number of times to repeat the key/value heads. + + Returns: + The repeated tensor with a shape of + `[batch, num_key_value_heads * n_rep, seq_len, head_dim]`. + """ + if n_rep == 1: + return hidden_states + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + hidden_states = keras.ops.expand_dims(hidden_states, 2) + hidden_states = keras.ops.repeat(hidden_states, n_rep, axis=2) + return keras.ops.reshape( + hidden_states, (batch, num_key_value_heads * n_rep, slen, head_dim) + ) + + +def apply_rotary_pos_emb(x, cos, sin, unsqueeze_dim=1): + """Applies rotary positional embedding to the input tensor. + + Args: + x: The input tensor. + cos: The cosine part of the rotary embedding. + sin: The sine part of the rotary embedding. + unsqueeze_dim: int. The dimension to unsqueeze `cos` and `sin` before + applying the embedding. Defaults to 1. + + Returns: + The tensor with rotary positional embeddings applied. + """ + cos = keras.ops.expand_dims(cos, axis=unsqueeze_dim) + sin = keras.ops.expand_dims(sin, axis=unsqueeze_dim) + return (x * cos) + (rotate_half(x) * sin) + + +def eager_attention_forward( + query, + key, + value, + num_key_value_groups, + head_dim, + attention_mask, + dropout=0.0, + scaling=None, + softcap=None, + training=False, +): + """Forward pass for an eager attention implementation. + + Args: + query: The query tensor. + key: The key tensor. + value: The value tensor. + num_key_value_groups: int. The number of key-value groups. + head_dim: int. The dimension of each attention head. + attention_mask: The attention mask to apply. + dropout: float. The dropout rate. Defaults to 0.0. + scaling: float, optional. The scaling factor for attention scores. + If `None`, it defaults to `head_dim**-0.5`. + softcap: float, optional. A softcap value to apply to attention weights. + Defaults to `None`. + training: bool. Whether the model is in training mode. Defaults to + `False`. + """ + if scaling is None: + scaling = head_dim**-0.5 + key_states = repeat_kv(key, num_key_value_groups) + value_states = repeat_kv(value, num_key_value_groups) + attn_weights = ( + keras.ops.matmul(query, keras.ops.transpose(key_states, (0, 1, 3, 2))) + * scaling + ) + if softcap is not None: + attn_weights = attn_weights / softcap + attn_weights = keras.ops.tanh(attn_weights) + attn_weights = attn_weights * softcap + if attention_mask is not None: + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights = attn_weights + keras.ops.cast( + causal_mask, dtype=attn_weights.dtype + ) + attn_weights_dtype = attn_weights.dtype + attn_weights = keras.ops.softmax( + keras.ops.cast(attn_weights, "float32"), axis=-1 + ) + attn_weights = keras.ops.cast(attn_weights, attn_weights_dtype) + if training: + attn_weights = keras.layers.Dropout(dropout)( + attn_weights, training=training + ) + attn_output = keras.ops.matmul(attn_weights, value_states) + attn_output = keras.ops.transpose(attn_output, (0, 2, 1, 3)) + return attn_output, attn_weights diff --git a/keras_hub/src/models/gemma3n/rms_normalization.py b/keras_hub/src/models/gemma3n/rms_normalization.py new file mode 100644 index 0000000000..48955699d0 --- /dev/null +++ b/keras_hub/src/models/gemma3n/rms_normalization.py @@ -0,0 +1,67 @@ +import keras + + +class Gemma3nRMSNorm(keras.layers.Layer): + """The Gemma 3n specific RMS normalization layer. + + Args: + dim: int. The dimension of the input tensor. + eps: float. A small constant added to the denominator for numerical + stability. Defaults to `1e-6`. + with_scale: bool. Whether to include a learnable scaling parameter. + Defaults to `True`. + """ + + def __init__(self, dim, eps=1e-6, with_scale=True, dtype=None, **kwargs): + super().__init__(dtype=dtype, **kwargs) + self.dim = dim + self.eps = eps + self.with_scale = with_scale + + def build(self, input_shape): + if self.with_scale: + self.scale = self.add_weight( + shape=(self.dim,), + initializer="ones", + trainable=True, + name="scale", + dtype=self.dtype_policy.variable_dtype, + ) + else: + self.scale = 1.0 + super().build(input_shape) + + def call(self, x): + norm_x = x * keras.ops.rsqrt( + keras.ops.mean(keras.ops.square(x), axis=-1, keepdims=True) + + self.eps + ) + return norm_x * self.scale + + def _int8_call(self, x): + x = keras.ops.cast(x, "float32") + norm_x = x * keras.ops.rsqrt( + keras.ops.mean(keras.ops.square(x), axis=-1, keepdims=True) + + self.eps + ) + norm_x = norm_x * self.scale + return keras.ops.cast(norm_x, x.dtype) + + def _float8_call(self, x): + x_calc = keras.ops.cast(x, "float32") + norm_x = x_calc * keras.ops.rsqrt( + keras.ops.mean(keras.ops.square(x_calc), axis=-1, keepdims=True) + + self.eps + ) + return keras.ops.cast(norm_x * self.scale, x.dtype) + + def get_config(self): + config = super().get_config() + config.update( + { + "dim": self.dim, + "eps": self.eps, + "with_scale": self.with_scale, + } + ) + return config From c0bcfc4251c890f3c4ea6e10ff94668f89fa2e88 Mon Sep 17 00:00:00 2001 From: harshaljanjani Date: Sat, 11 Oct 2025 13:07:43 +0530 Subject: [PATCH 02/10] refactor: Update according to changed MobileNetV5 design --- .../src/models/gemma3n/gemma3n_backbone.py | 39 +++++++++++----- .../models/gemma3n/gemma3n_backbone_test.py | 44 +++++++------------ 2 files changed, 42 insertions(+), 41 deletions(-) diff --git a/keras_hub/src/models/gemma3n/gemma3n_backbone.py b/keras_hub/src/models/gemma3n/gemma3n_backbone.py index e939297432..79d4b59eee 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_backbone.py +++ b/keras_hub/src/models/gemma3n/gemma3n_backbone.py @@ -1,3 +1,5 @@ +import inspect + import keras from keras_hub.src.api_export import keras_hub_export @@ -7,6 +9,9 @@ ) from keras_hub.src.models.gemma3n.gemma3n_text_model import Gemma3nTextModel from keras_hub.src.models.gemma3n.rms_normalization import Gemma3nRMSNorm +from keras_hub.src.models.mobilenetv5.mobilenetv5_backbone import ( + MobileNetV5Backbone, +) class Gemma3nMultimodalEmbedder(keras.layers.Layer): @@ -510,14 +515,14 @@ class Gemma3nBackbone(Backbone): MobileNetV5Backbone, ) from keras_hub.src.models.mobilenetv5.mobilenetv5_builder import ( - decode_arch_def, + convert_arch_def_to_stackwise, ) # Vision encoder config. vision_arch_def = [["er_r1_k3_s1_e1_c16"]] - vision_block_args = decode_arch_def(vision_arch_def) + stackwise_params = convert_arch_def_to_stackwise(vision_arch_def) vision_encoder = MobileNetV5Backbone( - block_args=vision_block_args, + **stackwise_params, num_features=4, image_shape=(224, 224, 3), use_msfa=False, @@ -630,20 +635,30 @@ def __init__( # === Layers === self.vision_encoder = None if vision_encoder_config: - from keras_hub.src.models.mobilenetv5.mobilenetv5_backbone import ( - MobileNetV5Backbone, - ) - - vision_encoder_config["dtype"] = dtype + local_vision_encoder_config = vision_encoder_config.copy() + local_vision_encoder_config["dtype"] = dtype self.vision_encoder = MobileNetV5Backbone.from_config( - vision_encoder_config + local_vision_encoder_config ) self.audio_encoder = None if audio_encoder_config: - audio_config = audio_encoder_config.copy() - audio_config.pop("dtype", None) + audio_encoder_sig = inspect.signature(Gemma3nAudioEncoder.__init__) + audio_encoder_args = { + p.name for p in audio_encoder_sig.parameters.values() + } + keras_layer_sig = inspect.signature(keras.layers.Layer.__init__) + keras_layer_args = { + p.name for p in keras_layer_sig.parameters.values() + } + valid_args = audio_encoder_args.union(keras_layer_args) + filtered_kwargs = { + key: value + for key, value in audio_encoder_config.items() + if key in valid_args + } + filtered_kwargs.pop("dtype", None) self.audio_encoder = Gemma3nAudioEncoder( - dtype=dtype, **audio_config + dtype=dtype, **filtered_kwargs ) self.language_model = Gemma3nTextModel( pad_token_id=pad_token_id, diff --git a/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py b/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py index 23e261f98c..96a6ac79af 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py +++ b/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py @@ -1,32 +1,21 @@ from copy import deepcopy import numpy as np -import pytest from absl.testing import parameterized -try: - from keras_hub.src.models.mobilenetv5.mobilenetv5_backbone import ( - MobileNetV5Backbone, - ) - from keras_hub.src.models.mobilenetv5.mobilenetv5_builder import ( - decode_arch_def, - ) - - mobilenetv5 = True -except ImportError: - mobilenetv5 = False - from keras_hub.src.models.gemma3n.gemma3n_audio_encoder import ( Gemma3nAudioEncoder, ) from keras_hub.src.models.gemma3n.gemma3n_backbone import Gemma3nBackbone +from keras_hub.src.models.mobilenetv5.mobilenetv5_backbone import ( + MobileNetV5Backbone, +) +from keras_hub.src.models.mobilenetv5.mobilenetv5_builder import ( + convert_arch_def_to_stackwise, +) from keras_hub.src.tests.test_case import TestCase -@pytest.mark.skipif( - not mobilenetv5, - reason="The pull request for MobileNetV5 is still open.", -) class Gemma3nBackboneTest(TestCase): def setUp(self): self.batch_size = 1 @@ -37,18 +26,15 @@ def setUp(self): self.audio_sequence_length = 16 self.audio_feature_size = 32 # === Vision Encoder === - if mobilenetv5: - vision_arch_def = [["er_r1_k3_s1_e1_c16"]] - vision_block_args = decode_arch_def(vision_arch_def) - vision_encoder = MobileNetV5Backbone( - block_args=vision_block_args, - num_features=4, - image_shape=(self.image_height, self.image_width, 3), - use_msfa=False, - ) - vision_encoder_config = vision_encoder.get_config() - else: - vision_encoder_config = None + vision_arch_def = [["er_r1_k3_s1_e1_c16"]] + stackwise_params = convert_arch_def_to_stackwise(vision_arch_def) + vision_encoder = MobileNetV5Backbone( + **stackwise_params, + num_features=4, + image_shape=(self.image_height, self.image_width, 3), + use_msfa=False, + ) + vision_encoder_config = vision_encoder.get_config() # === Audio Encoder === audio_encoder = Gemma3nAudioEncoder( hidden_size=8, From cb3e1b032cf67bf7851de592a420cc3823e70b91 Mon Sep 17 00:00:00 2001 From: harshaljanjani Date: Sat, 11 Oct 2025 15:26:01 +0530 Subject: [PATCH 03/10] fix: Reduce memory usage in tests to prevent OOM errors --- .../src/models/gemma3n/gemma3n_attention.py | 4 +- .../models/gemma3n/gemma3n_backbone_test.py | 44 +++++++++---------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/keras_hub/src/models/gemma3n/gemma3n_attention.py b/keras_hub/src/models/gemma3n/gemma3n_attention.py index dc1adaadff..526553dd61 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_attention.py +++ b/keras_hub/src/models/gemma3n/gemma3n_attention.py @@ -507,13 +507,13 @@ def _convert_to_block(self, hidden_states): return keras.ops.reshape(hidden_states, permute_dims) def _extract_block_context(self, hidden_states): + _, t = keras.ops.shape(hidden_states)[:2] + num_frames = (t + self.chunk_size - 1) // self.chunk_size pad_left = self.max_past_horizon pad_right = self.max_future_horizon + self.chunk_size - 1 hidden_states = self._pad_dim1(hidden_states, pad_left, pad_right) - _, t = keras.ops.shape(hidden_states)[:2] frame_len = self.context_size frame_step = self.chunk_size - num_frames = (t - frame_len) // frame_step + 1 start_indices = keras.ops.arange(0, num_frames) * frame_step frame_offsets = keras.ops.arange(0, frame_len) diff --git a/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py b/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py index 96a6ac79af..abad0a2f98 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py +++ b/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py @@ -19,14 +19,14 @@ class Gemma3nBackboneTest(TestCase): def setUp(self): self.batch_size = 1 - self.text_vocab_size = 50 - self.text_sequence_length = 16 - self.image_height = 224 - self.image_width = 224 - self.audio_sequence_length = 16 - self.audio_feature_size = 32 + self.text_vocab_size = 10 + self.text_sequence_length = 8 + self.image_height = 32 + self.image_width = 32 + self.audio_sequence_length = 8 + self.audio_feature_size = 16 # === Vision Encoder === - vision_arch_def = [["er_r1_k3_s1_e1_c16"]] + vision_arch_def = [["er_r1_k3_s1_e1_c8"]] stackwise_params = convert_arch_def_to_stackwise(vision_arch_def) vision_encoder = MobileNetV5Backbone( **stackwise_params, @@ -37,10 +37,10 @@ def setUp(self): vision_encoder_config = vision_encoder.get_config() # === Audio Encoder === audio_encoder = Gemma3nAudioEncoder( - hidden_size=8, + hidden_size=4, input_feat_size=self.audio_feature_size, - sscp_conv_channel_size=[4, 8], - sscp_conv_kernel_size=[(3, 3), (3, 3)], + sscp_conv_channel_size=[2, 4], + sscp_conv_kernel_size=[(1, 1), (1, 1)], sscp_conv_stride_size=[(2, 2), (2, 2)], sscp_conv_group_norm_eps=1e-5, conf_num_hidden_layers=1, @@ -48,36 +48,36 @@ def setUp(self): gradient_clipping=1.0, conf_residual_weight=0.5, conf_num_attention_heads=1, - conf_attention_chunk_size=4, - conf_attention_context_right=5, - conf_attention_context_left=5, + conf_attention_chunk_size=2, + conf_attention_context_right=1, + conf_attention_context_left=1, conf_attention_logit_cap=50.0, - conf_conv_kernel_size=5, + conf_conv_kernel_size=3, conf_reduction_factor=1, ) # === Multimodal === self.multimodal_init_kwargs = { "text_vocab_size": self.text_vocab_size, - "text_hidden_size": 8, + "text_hidden_size": 4, "num_hidden_layers": 1, "pad_token_id": 0, "num_attention_heads": 1, "num_key_value_heads": 1, - "head_dim": 8, # hidden_size / num_attention_heads - "intermediate_size": [16], + "head_dim": 4, # hidden_size / num_attention_heads + "intermediate_size": [8], "hidden_activation": "gelu_approximate", "layer_types": ["full_attention"], "sliding_window": 4, "rope_theta": 10000.0, "max_position_embeddings": self.text_sequence_length, - "vocab_size_per_layer_input": 50, + "vocab_size_per_layer_input": 10, "hidden_size_per_layer_input": 2, "altup_num_inputs": 2, "laurel_rank": 1, "vision_encoder_config": vision_encoder_config, - "vision_hidden_size": 16, + "vision_hidden_size": 8, "audio_encoder_config": audio_encoder.get_config(), - "audio_hidden_size": 8, + "audio_hidden_size": 4, } self.multimodal_input_data = { "token_ids": np.random.randint( @@ -156,8 +156,8 @@ def test_saved_model(self, backbone_type): ) @parameterized.named_parameters( - ("multimodal", "multimodal", 10354, 7), - ("text_only", "text_only", 1450, 4), + ("multimodal", "multimodal", 5450, 7), + ("text_only", "text_only", 350, 4), ) def test_architecture_characteristics( self, backbone_type, num_params, num_layers From 9849a7cd3e7c110ac14f87aaca729ed37036cd27 Mon Sep 17 00:00:00 2001 From: harshaljanjani Date: Fri, 31 Oct 2025 18:23:45 +0530 Subject: [PATCH 04/10] feat: Add checkpoint conversion script (1 preset) --- .../src/models/gemma3n/gemma3n_attention.py | 147 ++- .../models/gemma3n/gemma3n_audio_encoder.py | 91 +- .../src/models/gemma3n/gemma3n_backbone.py | 51 +- .../convert_gemma3n_checkpoints.py | 873 ++++++++++++++++++ 4 files changed, 1062 insertions(+), 100 deletions(-) create mode 100644 tools/checkpoint_conversion/convert_gemma3n_checkpoints.py diff --git a/keras_hub/src/models/gemma3n/gemma3n_attention.py b/keras_hub/src/models/gemma3n/gemma3n_attention.py index 526553dd61..954b229b5e 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_attention.py +++ b/keras_hub/src/models/gemma3n/gemma3n_attention.py @@ -63,20 +63,28 @@ def __init__( ) self.inv_timescales = keras.ops.expand_dims( keras.ops.expand_dims( - keras.ops.convert_to_tensor(inv_timescales), 0 + keras.ops.convert_to_tensor(inv_timescales, dtype="float32"), 0 ), 0, ) def build(self, input_shape): - self.pos_proj.build((None, self.channels)) + if not self.pos_proj.built: + self.pos_proj.build((None, self.channels)) super().build(input_shape) def _get_timing_signal_1d_pos(self, position, dtype): position = keras.ops.cast( keras.ops.expand_dims(position, axis=-1), "float32" ) - scaled_time = position * keras.ops.cast(self.inv_timescales, "float32") + pos_shape = keras.ops.shape(position) + inv_shape = keras.ops.shape(self.inv_timescales) + target_shape = (pos_shape[0], pos_shape[1], inv_shape[2]) + position = keras.ops.broadcast_to(position, target_shape) + inv_timescales = keras.ops.broadcast_to( + self.inv_timescales, target_shape + ) + scaled_time = position * inv_timescales timing_signal = keras.ops.concatenate( [keras.ops.sin(scaled_time), keras.ops.cos(scaled_time)], axis=-1 ) @@ -92,27 +100,56 @@ def _relative_shift( key_context_size, max_span_plus_1, ): - pad_amount_last_dim = (key_context_size + 1) - max_span_plus_1 - padding_tuple = [[0, 0]] * (len(term_bd_before_shift.shape) - 1) + [ - [0, pad_amount_last_dim] - ] + msp1_val = max_span_plus_1 + kcs_val = key_context_size + if not isinstance(msp1_val, int) and hasattr(msp1_val, "shape"): + msp1_val = keras.ops.shape(msp1_val)[-1] + if not isinstance(kcs_val, int) and hasattr(kcs_val, "shape"): + kcs_val = keras.ops.shape(kcs_val)[-1] + pad_amount_last_dim = (kcs_val + 1) - msp1_val + padding_tuple = [[0, 0]] * ( + len(keras.ops.shape(term_bd_before_shift)) - 1 + ) + [[0, pad_amount_last_dim]] term_bd_padded = keras.ops.pad(term_bd_before_shift, padding_tuple) + shape_padded = keras.ops.shape(term_bd_padded) + B = shape_padded[0] + H = shape_padded[1] + U = shape_padded[2] + W = shape_padded[3] + C_plus_1 = shape_padded[4] + target_shape_1_last_dim = -1 + if W is not None and C_plus_1 is not None: + try: + target_shape_1_last_dim = W * C_plus_1 + except TypeError: + target_shape_1_last_dim = -1 term_bd_reshaped = keras.ops.reshape( term_bd_padded, ( - batch_size, - num_heads, - -1, + B if B is not None else -1, + H if H is not None else -1, + U if U is not None else -1, + target_shape_1_last_dim, ), - )[:, :, : query_block_size * key_context_size] + ) + slice_end = None + qbs_val = query_block_size + if not isinstance(qbs_val, int) and hasattr(qbs_val, "shape"): + qbs_val = keras.ops.shape(qbs_val)[0] + if qbs_val is not None and kcs_val is not None: + try: + slice_end = qbs_val * kcs_val + except TypeError: + slice_end = None + term_bd_reshaped = term_bd_reshaped[..., :slice_end] term_bd_shifted = keras.ops.reshape( term_bd_reshaped, ( - batch_size, - num_heads, - -1, - query_block_size, - key_context_size, + B if B is not None else -1, + H if H is not None else -1, + U if U is not None else -1, + W if W is not None else -1, + kcs_val if kcs_val is not None else -1, ), ) return term_bd_shifted @@ -140,7 +177,7 @@ def call(self, queries, keys): ), 0, ) - max_span_plus_1 = pos_indices.shape[1] + max_span_plus_1 = keras.ops.shape(pos_indices)[1] sin_emb_timing_signal = self._get_timing_signal_1d_pos( pos_indices, dtype=queries.dtype ) @@ -157,29 +194,8 @@ def call(self, queries, keys): term_ac = keras.ops.matmul(queries_p, keys_p_t) q_permuted = keras.ops.transpose(queries, (0, 3, 1, 2, 4)) s_permuted = keras.ops.transpose(sin_emb, (1, 2, 0)) - - q_reshaped_dim = -1 - if num_query_blocks is not None: - q_reshaped_dim = num_query_blocks * query_block_size - - q_reshaped = keras.ops.reshape( - q_permuted, - ( - batch_size * num_heads, - q_reshaped_dim, - head_dim, - ), - ) - term_bd_unshifed_matmul = keras.ops.matmul(q_reshaped, s_permuted) - term_bd_unshifed = keras.ops.reshape( - term_bd_unshifed_matmul, - ( - batch_size, - num_heads, - -1, - query_block_size, - max_span_plus_1, - ), + term_bd_unshifed = keras.ops.einsum( + "bhuwd,hdf->bhuwf", q_permuted, s_permuted ) term_bd_shifted = self._relative_shift( term_bd_unshifed, @@ -311,8 +327,12 @@ def build(self, input_shape): self.head_dim, ) self.q_norm.build(norm_shape) - self.k_norm.build(norm_shape) - self.v_norm.build(norm_shape) + k_norm_shape = input_shape[:-1] + ( + self.num_key_value_heads, + self.head_dim, + ) + self.k_norm.build(k_norm_shape) + self.v_norm.build(k_norm_shape) super().build(input_shape) def call( @@ -488,12 +508,26 @@ def build(self, input_shape): self.q_proj.build(input_shape) self.k_proj.build(input_shape) self.v_proj.build(input_shape) - self.relative_position_embedding.build(input_shape) + q_build_shape = ( + None, + None, + self.chunk_size, + self.num_heads, + self.head_dim, + ) + k_build_shape = ( + None, + None, + self.context_size, + self.num_heads, + self.head_dim, + ) + self.relative_position_embedding.build((q_build_shape, k_build_shape)) super().build(input_shape) def _pad_dim1(self, x, pad_left, pad_right): paddings = [[0, 0], [pad_left, pad_right]] + [ - [0, 0] for _ in range(len(x.shape) - 2) + [0, 0] for _ in range(len(keras.ops.shape(x)) - 2) ] return keras.ops.pad(x, paddings) @@ -541,11 +575,21 @@ def call(self, hidden_states, mask): extracted_valid_mask_blocks = self._extract_block_context( original_valid_mask ) + mask_block_shape = keras.ops.shape(extracted_valid_mask_blocks) + if len(mask_block_shape) > 3: + axes_to_squeeze = [ + i + for i, dim in enumerate(mask_block_shape) + if i > 0 and i < len(mask_block_shape) - 1 and dim == 1 + ] + if axes_to_squeeze: + extracted_valid_mask_blocks = keras.ops.squeeze( + extracted_valid_mask_blocks, axis=axes_to_squeeze + ) + mask_block_shape = keras.ops.shape(extracted_valid_mask_blocks) if ( - len(extracted_valid_mask_blocks.shape) == 4 - and extracted_valid_mask_blocks.shape[2] - * extracted_valid_mask_blocks.shape[3] - == self.context_size + len(mask_block_shape) == 4 + and mask_block_shape[2] * mask_block_shape[3] == self.context_size ): extracted_valid_mask_blocks = keras.ops.reshape( extracted_valid_mask_blocks, @@ -569,7 +613,12 @@ def call(self, hidden_states, mask): logits = logits / softcap logits = keras.ops.tanh(logits) logits = logits * softcap - min_val = np.finfo(keras.backend.floatx()).min + compute_dtype = logits.dtype + if "float16" in str(compute_dtype): + min_val = np.finfo(np.float16).min + else: + min_val = np.finfo(np.float32).min + min_val = keras.ops.convert_to_tensor(min_val, dtype=compute_dtype) logits = keras.ops.where(final_condition_for_where, logits, min_val) probabilities = keras.ops.softmax( keras.ops.cast(logits, "float32"), axis=-1 diff --git a/keras_hub/src/models/gemma3n/gemma3n_audio_encoder.py b/keras_hub/src/models/gemma3n/gemma3n_audio_encoder.py index 0a4cdc05e6..f1c8167a31 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_audio_encoder.py +++ b/keras_hub/src/models/gemma3n/gemma3n_audio_encoder.py @@ -123,6 +123,21 @@ def build(self, input_shape): f_out_0 = self.calculated_f_out_dims[0] conv1_input_shape = (None, c_out_0, t_out_0, f_out_0) self.conv_1.build(conv1_input_shape) + if t_out_0 is not None: + t_padded_1 = ( + t_out_0 + + self.calculated_block_padding[1][2] + + self.calculated_block_padding[1][3] + ) + kernel_h_1, _ = self.sscp_conv_kernel_size[1] + stride_h_1, _ = self.sscp_conv_stride_size[1] + t_out_1 = (t_padded_1 - kernel_h_1) // stride_h_1 + 1 + else: + t_out_1 = None + c_out_1 = self.sscp_conv_channel_size[1] + f_out_1 = self.calculated_f_out_dims[1] + proj_input_shape = (None, t_out_1, f_out_1 * c_out_1) + self.input_proj_linear.build(proj_input_shape) super().build(input_shape) def compute_output_shape(self, input_shape): @@ -262,7 +277,19 @@ def __init__( ) def build(self, input_shape): - audio_encodings_shape, _ = input_shape + if ( + isinstance(input_shape, tuple) + and len(input_shape) == 2 + and isinstance(input_shape[0], tuple) + ): + audio_encodings_shape, _ = input_shape + elif isinstance(input_shape, tuple) and len(input_shape) >= 3: + audio_encodings_shape = input_shape + else: + raise ValueError( + f"Unexpected `input_shape` structure for " + f"Gemma3nAudioConformerBlock: {input_shape}" + ) self.ffw_layer_start.build(audio_encodings_shape) self.attention.build(audio_encodings_shape) self.lconv1d.build(audio_encodings_shape) @@ -279,8 +306,14 @@ def call(self, inputs): audio_encodings = self.ffw_layer_start(audio_encodings) audio_encodings = self.attention(audio_encodings, audio_mel_mask) validity_mask_for_lconv = keras.ops.logical_not(audio_mel_mask) + mask_shape = keras.ops.shape(validity_mask_for_lconv) + enc_shape = keras.ops.shape(audio_encodings) + if len(mask_shape) < len(enc_shape): + validity_mask_for_lconv = keras.ops.expand_dims( + validity_mask_for_lconv, -1 + ) audio_encodings_for_lconv_input = audio_encodings * keras.ops.cast( - keras.ops.expand_dims(validity_mask_for_lconv, -1), + validity_mask_for_lconv, audio_encodings.dtype, ) audio_encodings = self.lconv1d(audio_encodings_for_lconv_input) @@ -420,7 +453,17 @@ def __init__( ] def build(self, input_shape): - audio_mel_shape, _ = input_shape + if ( + isinstance(input_shape, tuple) + and len(input_shape) == 2 + and isinstance(input_shape[0], tuple) + ): + audio_mel_shape, _ = input_shape + else: + raise ValueError( + f"Unexpected `input_shape` structure for Gemma3nAudioEncoder: " + f"{input_shape}" + ) self.subsample_conv_projection.build(audio_mel_shape) encodings_shape = self.subsample_conv_projection.compute_output_shape( audio_mel_shape @@ -429,10 +472,10 @@ def build(self, input_shape): time_stride_product = 1 for stride_pair in self.sscp_conv_stride_size: time_stride_product *= stride_pair[0] - batch_size = ( - audio_mel_shape[0] if audio_mel_shape[0] is not None else -1 + batch_size = audio_mel_shape[0] + current_mask_shape = ( + (batch_size, t_sub) if t_sub is not None else (batch_size, None) ) - current_mask_shape = (batch_size, t_sub) current_encodings_shape = encodings_shape for block in self.conformer: block.build((current_encodings_shape, current_mask_shape)) @@ -446,11 +489,20 @@ def compute_output_shape(self, input_shape): encodings_shape = self.subsample_conv_projection.compute_output_shape( audio_mel_shape ) + t_sub = encodings_shape[1] + time_stride_product = 1 + for stride_pair in self.sscp_conv_stride_size: + time_stride_product *= stride_pair[0] + batch_size = audio_mel_shape[0] + current_mask_shape = ( + (batch_size, t_sub) if t_sub is not None else (batch_size, None) + ) current_encodings_shape = encodings_shape for block in self.conformer: current_encodings_shape = block.compute_output_shape( - (current_encodings_shape, None) + (current_encodings_shape, current_mask_shape) ) + final_mask_shape = current_mask_shape if self.conf_reduction_factor > 1: t_sub = current_encodings_shape[1] if t_sub is not None: @@ -460,7 +512,12 @@ def compute_output_shape(self, input_shape): new_t, current_encodings_shape[2], ) - return current_encodings_shape, None + final_mask_shape = ( + (current_mask_shape[0], new_t) + if current_mask_shape[1] is not None + else (current_mask_shape[0], None) + ) + return current_encodings_shape, final_mask_shape def call(self, inputs): audio_mel, audio_mel_mask = inputs @@ -469,19 +526,31 @@ def call(self, inputs): time_stride_product = 1 for stride_pair in self.sscp_conv_stride_size: time_stride_product *= stride_pair[0] + mask_rank = len(keras.ops.shape(audio_mel_mask)) + audio_mel_mask_to_take = audio_mel_mask + if mask_rank > 2: + audio_mel_mask_to_take = keras.ops.squeeze( + audio_mel_mask, axis=list(range(1, mask_rank - 1)) + ) indices = keras.ops.arange(0, t_sub) * time_stride_product indices = keras.ops.clip( - indices, 0, keras.ops.shape(audio_mel_mask)[1] - 1 + indices, 0, keras.ops.shape(audio_mel_mask_to_take)[1] - 1 ) - current_mask = keras.ops.take(audio_mel_mask, indices, axis=1) + current_mask = keras.ops.take(audio_mel_mask_to_take, indices, axis=1) for block in self.conformer: audio_encodings = block((audio_encodings, current_mask)) if self.conf_reduction_factor > 1: audio_encodings = audio_encodings[:, :: self.conf_reduction_factor] current_mask = current_mask[:, :: self.conf_reduction_factor] + mask_shape = keras.ops.shape(current_mask) + enc_shape = keras.ops.shape(audio_encodings) + if len(mask_shape) < len(enc_shape): + current_mask_expanded = keras.ops.expand_dims(current_mask, axis=-1) + else: + current_mask_expanded = current_mask return audio_encodings * keras.ops.cast( - keras.ops.logical_not(keras.ops.expand_dims(current_mask, axis=-1)), + keras.ops.logical_not(current_mask_expanded), audio_encodings.dtype, ), current_mask diff --git a/keras_hub/src/models/gemma3n/gemma3n_backbone.py b/keras_hub/src/models/gemma3n/gemma3n_backbone.py index 79d4b59eee..3440ad6622 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_backbone.py +++ b/keras_hub/src/models/gemma3n/gemma3n_backbone.py @@ -187,6 +187,9 @@ def __init__( self.vocab_size_per_layer_input = vocab_size_per_layer_input self.text_hidden_size = language_model.embed_tokens.embedding_dim + def build(self, input_shape): + super().build(input_shape) + def call(self, inputs): input_ids = inputs["token_ids"] pixel_values = inputs.get("pixel_values") @@ -262,28 +265,10 @@ def scatter_vision_features(): batch_size, seq_len, hidden_size = keras.ops.shape( inputs_embeds ) - num_soft_tokens = self.vision_soft_tokens_per_image - start_mask_f32 = keras.ops.cast( - image_token_mask, dtype="float32" - ) - start_mask_f32 = keras.ops.expand_dims(start_mask_f32, axis=-1) - kernel = keras.ops.ones( - (num_soft_tokens, 1, 1), dtype="float32" - ) - padded_mask = keras.ops.pad( - start_mask_f32, - [[0, 0], [num_soft_tokens - 1, 0], [0, 0]], - ) - full_mask_f32 = keras.ops.conv( - padded_mask, kernel, strides=1, padding="valid" - ) - full_mask = keras.ops.cast( - keras.ops.squeeze(full_mask_f32, axis=-1) > 0.5, "bool" - ) flat_vision_embeds = keras.ops.reshape( vision_embeds, [-1, hidden_size] ) - flat_full_mask = keras.ops.reshape(full_mask, [-1]) + flat_full_mask = keras.ops.reshape(image_token_mask, [-1]) gather_indices = ( keras.ops.cumsum(keras.ops.cast(flat_full_mask, "int32")) - 1 @@ -297,7 +282,9 @@ def scatter_vision_features(): replacement_tensor = keras.ops.reshape( replacement_values, (batch_size, seq_len, hidden_size) ) - expanded_full_mask = keras.ops.expand_dims(full_mask, axis=-1) + expanded_full_mask = keras.ops.expand_dims( + image_token_mask, axis=-1 + ) return keras.ops.where( expanded_full_mask, replacement_tensor, inputs_embeds ) @@ -359,28 +346,10 @@ def scatter_audio_features(): batch_size, seq_len, hidden_size = keras.ops.shape( inputs_embeds ) - num_soft_tokens = self.audio_soft_tokens_per_image - start_mask_f32 = keras.ops.cast( - audio_token_mask, dtype="float32" - ) - start_mask_f32 = keras.ops.expand_dims(start_mask_f32, axis=-1) - kernel = keras.ops.ones( - (num_soft_tokens, 1, 1), dtype="float32" - ) - padded_mask = keras.ops.pad( - start_mask_f32, - [[0, 0], [num_soft_tokens - 1, 0], [0, 0]], - ) - full_mask_f32 = keras.ops.conv( - padded_mask, kernel, strides=1, padding="valid" - ) - full_mask = keras.ops.cast( - keras.ops.squeeze(full_mask_f32, axis=-1) > 0.5, "bool" - ) flat_audio_embeds = keras.ops.reshape( audio_embeds, [-1, hidden_size] ) - flat_full_mask = keras.ops.reshape(full_mask, [-1]) + flat_full_mask = keras.ops.reshape(audio_token_mask, [-1]) gather_indices = ( keras.ops.cumsum(keras.ops.cast(flat_full_mask, "int32")) - 1 @@ -394,7 +363,9 @@ def scatter_audio_features(): replacement_tensor = keras.ops.reshape( replacement_values, (batch_size, seq_len, hidden_size) ) - expanded_full_mask = keras.ops.expand_dims(full_mask, axis=-1) + expanded_full_mask = keras.ops.expand_dims( + audio_token_mask, axis=-1 + ) return keras.ops.where( expanded_full_mask, replacement_tensor, inputs_embeds ) diff --git a/tools/checkpoint_conversion/convert_gemma3n_checkpoints.py b/tools/checkpoint_conversion/convert_gemma3n_checkpoints.py new file mode 100644 index 0000000000..b9ae8df019 --- /dev/null +++ b/tools/checkpoint_conversion/convert_gemma3n_checkpoints.py @@ -0,0 +1,873 @@ +import gc +import os +import types + +import keras +import numpy as np +import torch +from absl import app +from absl import flags +from PIL import Image +from transformers import Gemma3nForConditionalGeneration +from transformers import Gemma3nProcessor + +from keras_hub.src.models.gemma3n.gemma3n_backbone import Gemma3nBackbone +from keras_hub.src.models.mobilenetv5.mobilenetv5_attention import ( + MobileAttention, +) +from keras_hub.src.models.mobilenetv5.mobilenetv5_blocks import EdgeResidual +from keras_hub.src.models.mobilenetv5.mobilenetv5_blocks import ( + UniversalInvertedResidual, +) +from keras_hub.src.models.mobilenetv5.mobilenetv5_builder import ( + convert_arch_def_to_stackwise, +) +from keras_hub.src.models.mobilenetv5.mobilenetv5_layers import ConvNormAct + +PRESET_MAP = { + "gemma3n_e2b": "google/gemma-3n-E2B", +} +FLAGS = flags.FLAGS +flags.DEFINE_string( + "preset", None, f"Must be one of {','.join(PRESET_MAP.keys())}" +) +flags.DEFINE_string( + "cache_dir", "./hf_cache", "Directory to cache Hugging Face downloads." +) +flags.mark_flag_as_required("preset") + + +MOBILENETV5_300M_ENC_ARCH_DEF = [ + # Stage 0: 128x128 in + [ + "er_r1_k3_s2_e4_c128", + "er_r1_k3_s1_e4_c128", + "er_r1_k3_s1_e4_c128", + ], + # Stage 1: 256x256 in + [ + "uir_r1_a3_k5_s2_e6_c256", + "uir_r1_a5_k0_s1_e4_c256", + "uir_r1_a3_k0_s1_e4_c256", + "uir_r1_a5_k0_s1_e4_c256", + "uir_r1_a3_k0_s1_e4_c256", + ], + # Stage 2: 640x640 in + [ + "uir_r1_a5_k5_s2_e6_c640", + "uir_r1_a5_k0_s1_e4_c640", + "uir_r1_a5_k0_s1_e4_c640", + "uir_r1_a5_k0_s1_e4_c640", + "uir_r1_a5_k0_s1_e4_c640", + "uir_r1_a5_k0_s1_e4_c640", + "uir_r1_a5_k0_s1_e4_c640", + "uir_r1_a5_k0_s1_e4_c640", + "uir_r1_a0_k0_s1_e1_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + "mqa_r1_k3_h12_v2_s1_d64_c640", + "uir_r1_a0_k0_s1_e2_c640", + ], + # Stage 3: 1280x1280 in + [ + "uir_r1_a5_k5_s2_e6_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + "mqa_r1_k3_h16_s1_d96_c1280", + "uir_r1_a0_k0_s1_e2_c1280", + ], +] +mobilenetv5_config = convert_arch_def_to_stackwise( + MOBILENETV5_300M_ENC_ARCH_DEF +) +mobilenetv5_config.update( + { + "stem_size": 64, + "num_features": 2048, + "norm_layer": "rms_norm", + "act_layer": "gelu", + "use_msfa": True, + "layer_scale_init_value": 1e-5, + } +) +MODEL_CONFIGS = {"mobilenetv5_300m_enc": mobilenetv5_config} + + +def convert_model(hf_config, dtype=None): + text_config = hf_config.text_config + vision_config = hf_config.vision_config + audio_config = hf_config.audio_config + vision_encoder_config = MODEL_CONFIGS["mobilenetv5_300m_enc"].copy() + vision_encoder_config["image_shape"] = (768, 768, 3) + if text_config.hidden_activation == "gelu_pytorch_tanh": + text_config.hidden_activation = "gelu_approximate" + gemma3n_backbone = Gemma3nBackbone( + text_vocab_size=text_config.vocab_size, + text_hidden_size=text_config.hidden_size, + num_hidden_layers=text_config.num_hidden_layers, + pad_token_id=0, + num_attention_heads=text_config.num_attention_heads, + num_key_value_heads=text_config.num_key_value_heads, + head_dim=text_config.head_dim, + intermediate_size=text_config.intermediate_size, + hidden_activation=text_config.hidden_activation, + layer_types=text_config.layer_types, + sliding_window=text_config.sliding_window, + rope_theta=text_config.rope_theta, + max_position_embeddings=text_config.max_position_embeddings, + vocab_size_per_layer_input=text_config.vocab_size_per_layer_input, + hidden_size_per_layer_input=text_config.hidden_size_per_layer_input, + altup_num_inputs=text_config.altup_num_inputs, + laurel_rank=text_config.laurel_rank, + attention_bias=text_config.attention_bias, + attention_dropout=text_config.attention_dropout, + rope_scaling=text_config.rope_scaling, + activation_sparsity_pattern=text_config.activation_sparsity_pattern, + altup_coef_clip=text_config.altup_coef_clip, + altup_active_idx=text_config.altup_active_idx, + altup_correct_scale=text_config.altup_correct_scale, + num_kv_shared_layers=text_config.num_kv_shared_layers, + vision_encoder_config=vision_encoder_config, + vision_hidden_size=vision_config.hidden_size, + vision_vocab_size=vision_config.vocab_size, + vision_vocab_offset=vision_config.vocab_offset, + vision_soft_tokens_per_image=hf_config.vision_soft_tokens_per_image, + image_token_id=hf_config.image_token_id, + audio_encoder_config=audio_config.to_dict(), + audio_hidden_size=audio_config.hidden_size, + audio_vocab_size=audio_config.vocab_size, + audio_vocab_offset=audio_config.vocab_offset, + audio_soft_tokens_per_image=hf_config.audio_soft_tokens_per_image, + audio_token_id=hf_config.audio_token_id, + rms_norm_eps=text_config.rms_norm_eps, + dtype=dtype, + ) + return gemma3n_backbone + + +class HfToKerasConverter: + def __init__(self, hf_model): + self.hf_state_dict = { + k: v for k, v in hf_model.state_dict().items() if "lm_head" not in k + } + + def _port_weights(self, layer_or_variable, hf_key, transpose_dims=None): + if hf_key not in self.hf_state_dict: + print(f"⚠️ Weight key not found in state_dict: {hf_key}") + return + weights = self.hf_state_dict[hf_key].cpu().float().numpy() + if transpose_dims: + weights = weights.transpose(transpose_dims) + + if hasattr(layer_or_variable, "assign"): + layer_or_variable.assign(weights) + return + + current_weights = layer_or_variable.get_weights() + if ( + not current_weights + and hasattr(layer_or_variable, "weights") + and not layer_or_variable.weights + ): + print( + f"⚠️ Keras layer {layer_or_variable.name} has no weights to " + "set. Skipping." + ) + return + if len(current_weights) == 1: + layer_or_variable.set_weights([weights]) + elif len(current_weights) == 2: + bias_key = hf_key.replace(".weight", ".bias") + if bias_key in self.hf_state_dict: + bias = self.hf_state_dict[bias_key].cpu().numpy() + layer_or_variable.set_weights([weights, bias]) + else: + layer_or_variable.set_weights([weights, current_weights[1]]) + else: + print( + f"❓ Unexpected number of weights in layer " + f"{layer_or_variable.name}" + ) + + def _port_rms_norm(self, layer, hf_prefix): + key = f"{hf_prefix}.weight" + self._port_weights(layer, key) + + def _port_bn(self, layer, hf_prefix): + keys = [ + f"{hf_prefix}.weight", + f"{hf_prefix}.bias", + f"{hf_prefix}.running_mean", + f"{hf_prefix}.running_var", + ] + weights = [ + self.hf_state_dict[key].cpu().float().numpy() for key in keys + ] + layer.set_weights(weights) + + def _port_cna(self, cna_layer: ConvNormAct, hf_conv_prefix, hf_norm_prefix): + if isinstance(cna_layer.conv, keras.layers.DepthwiseConv2D): + self._port_weights( + cna_layer.conv, + f"{hf_conv_prefix}.weight", + transpose_dims=(2, 3, 0, 1), + ) + else: + self._port_weights( + cna_layer.conv, + f"{hf_conv_prefix}.weight", + transpose_dims=(2, 3, 1, 0), + ) + if f"{hf_norm_prefix}.running_mean" in self.hf_state_dict: + self._port_bn(cna_layer.norm, hf_norm_prefix) + else: + self._port_rms_norm(cna_layer.norm, hf_norm_prefix) + + def _port_attn(self, attn_layer, hf_attn_prefix): + self._port_weights( + attn_layer.query_layers[-1], + f"{hf_attn_prefix}.query.proj.weight", + (2, 3, 1, 0), + ) + if len(attn_layer.key_layers) > 1: + self._port_weights( + attn_layer.key_layers[0], + f"{hf_attn_prefix}.key.down_conv.weight", + (2, 3, 0, 1), + ) + key_norm_layer = attn_layer.key_layers[1] + if f"{hf_attn_prefix}.key.norm.running_mean" in self.hf_state_dict: + self._port_bn(key_norm_layer, f"{hf_attn_prefix}.key.norm") + else: + self._port_rms_norm( + key_norm_layer, f"{hf_attn_prefix}.key.norm" + ) + self._port_weights( + attn_layer.key_layers[-1], + f"{hf_attn_prefix}.key.proj.weight", + (2, 3, 1, 0), + ) + if len(attn_layer.value_layers) > 1: + self._port_weights( + attn_layer.value_layers[0], + f"{hf_attn_prefix}.value.down_conv.weight", + (2, 3, 0, 1), + ) + value_norm_layer = attn_layer.value_layers[1] + if ( + f"{hf_attn_prefix}.value.norm.running_mean" + in self.hf_state_dict + ): + self._port_bn(value_norm_layer, f"{hf_attn_prefix}.value.norm") + else: + self._port_rms_norm( + value_norm_layer, f"{hf_attn_prefix}.value.norm" + ) + self._port_weights( + attn_layer.value_layers[-1], + f"{hf_attn_prefix}.value.proj.weight", + (2, 3, 1, 0), + ) + self._port_weights( + attn_layer.output_proj_layers[-2], + f"{hf_attn_prefix}.output.proj.weight", + (2, 3, 1, 0), + ) + + def _port_vision_tower(self, keras_model): + print(" -> Porting vision tower (MobileNetV5)...") + backbone = keras_model.vision_encoder + hf_prefix = "model.vision_tower.timm_model" + + stem_layer = backbone.get_layer("conv_stem") + self._port_cna( + stem_layer, + f"{hf_prefix}.conv_stem.conv", + f"{hf_prefix}.conv_stem.bn", + ) + + block_layers = [ + layer + for layer in backbone.layers + if isinstance( + layer, + (EdgeResidual, UniversalInvertedResidual, MobileAttention), + ) + ] + block_counter = 0 + for stack_idx in range(len(backbone.stackwise_num_blocks)): + for block_idx_in_stage in range( + backbone.stackwise_num_blocks[stack_idx] + ): + block = block_layers[block_counter] + block_prefix = ( + f"{hf_prefix}.blocks.{stack_idx}.{block_idx_in_stage}" + ) + if isinstance(block, EdgeResidual): + self._port_cna( + block.conv_exp, + f"{block_prefix}.conv_exp", + f"{block_prefix}.bn1", + ) + self._port_cna( + block.conv_pwl, + f"{block_prefix}.conv_pwl", + f"{block_prefix}.bn2", + ) + elif isinstance(block, UniversalInvertedResidual): + if hasattr(block, "dw_start") and not isinstance( + block.dw_start, types.FunctionType + ): + self._port_cna( + block.dw_start, + f"{block_prefix}.dw_start.conv", + f"{block_prefix}.dw_start.bn", + ) + self._port_cna( + block.pw_exp, + f"{block_prefix}.pw_exp.conv", + f"{block_prefix}.pw_exp.bn", + ) + if hasattr(block, "dw_mid") and not isinstance( + block.dw_mid, types.FunctionType + ): + self._port_cna( + block.dw_mid, + f"{block_prefix}.dw_mid.conv", + f"{block_prefix}.dw_mid.bn", + ) + self._port_cna( + block.pw_proj, + f"{block_prefix}.pw_proj.conv", + f"{block_prefix}.pw_proj.bn", + ) + gamma_key = f"{block_prefix}.layer_scale.gamma" + if gamma_key in self.hf_state_dict: + self._port_weights(block.layer_scale, gamma_key) + elif isinstance(block, MobileAttention): + self._port_rms_norm(block.norm, f"{block_prefix}.norm") + gamma_key = f"{block_prefix}.layer_scale.gamma" + if gamma_key in self.hf_state_dict: + self._port_weights(block.layer_scale, gamma_key) + attn_prefix = f"{block_prefix}.attn" + self._port_attn(block.attn, attn_prefix) + block_counter += 1 + try: + msfa_layer = backbone.get_layer("msfa") + msfa_prefix = f"{hf_prefix}.msfa" + ffn = msfa_layer.ffn + self._port_cna( + ffn.pw_exp, + f"{msfa_prefix}.ffn.pw_exp.conv", + f"{msfa_prefix}.ffn.pw_exp.bn", + ) + self._port_cna( + ffn.pw_proj, + f"{msfa_prefix}.ffn.pw_proj.conv", + f"{msfa_prefix}.ffn.pw_proj.bn", + ) + self._port_rms_norm(msfa_layer.norm, f"{msfa_prefix}.norm") + except ValueError: + pass + + def _port_language_model(self, keras_model): + print(" -> Porting language model...") + lm = keras_model.language_model + hf_prefix = "model.language_model" + + self._port_weights( + lm.embed_tokens.embedding, f"{hf_prefix}.embed_tokens.weight" + ) + self._port_rms_norm(lm.norm, f"{hf_prefix}.norm") + self._port_weights( + lm.embed_tokens_per_layer.embedding, + f"{hf_prefix}.embed_tokens_per_layer.weight", + ) + self._port_weights( + lm.per_layer_model_projection, + f"{hf_prefix}.per_layer_model_projection.weight", + transpose_dims=(1, 0), + ) + self._port_rms_norm( + lm.per_layer_projection_norm, + f"{hf_prefix}.per_layer_projection_norm", + ) + + for i, proj in enumerate(lm.altup_projections): + self._port_weights( + proj, + f"{hf_prefix}.altup_projections.{i}.weight", + transpose_dims=(1, 0), + ) + for i, proj in enumerate(lm.altup_unembed_projections): + self._port_weights( + proj, + f"{hf_prefix}.altup_unembed_projections.{i}.weight", + transpose_dims=(1, 0), + ) + + for i, layer in enumerate(lm.layers): + layer_prefix = f"{hf_prefix}.layers.{i}" + + # Attention + self._port_weights( + layer.attention.q_proj, + f"{layer_prefix}.self_attn.q_proj.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + layer.attention.k_proj, + f"{layer_prefix}.self_attn.k_proj.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + layer.attention.v_proj, + f"{layer_prefix}.self_attn.v_proj.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + layer.attention.o_proj, + f"{layer_prefix}.self_attn.o_proj.weight", + transpose_dims=(1, 0), + ) + self._port_rms_norm( + layer.attention.q_norm, f"{layer_prefix}.self_attn.q_norm" + ) + self._port_rms_norm( + layer.attention.k_norm, f"{layer_prefix}.self_attn.k_norm" + ) + + # MLP + self._port_weights( + layer.mlp.gate_proj, + f"{layer_prefix}.mlp.gate_proj.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + layer.mlp.up_proj, + f"{layer_prefix}.mlp.up_proj.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + layer.mlp.down_proj, + f"{layer_prefix}.mlp.down_proj.weight", + transpose_dims=(1, 0), + ) + + # LayerNorms + self._port_rms_norm( + layer.input_layernorm, f"{layer_prefix}.input_layernorm" + ) + self._port_rms_norm( + layer.post_attention_layernorm, + f"{layer_prefix}.post_attention_layernorm", + ) + self._port_rms_norm( + layer.pre_feedforward_layernorm, + f"{layer_prefix}.pre_feedforward_layernorm", + ) + self._port_rms_norm( + layer.post_feedforward_layernorm, + f"{layer_prefix}.post_feedforward_layernorm", + ) + + # AltUp + altup_prefix = f"{layer_prefix}.altup" + self._port_weights( + layer.altup.correction_coefs, + f"{altup_prefix}.correction_coefs.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + layer.altup.prediction_coefs, + f"{altup_prefix}.prediction_coefs.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + layer.altup.modality_router, + f"{altup_prefix}.modality_router.weight", + transpose_dims=(1, 0), + ) + self._port_rms_norm( + layer.altup.router_norm, f"{altup_prefix}.router_norm" + ) + if layer.altup.altup_correct_scale: + self._port_weights( + layer.altup.correct_output_scale, + f"{altup_prefix}.correct_output_scale", + ) + + # Laurel + laurel_prefix = f"{layer_prefix}.laurel" + self._port_weights( + layer.laurel.linear_left, + f"{laurel_prefix}.linear_left.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + layer.laurel.linear_right, + f"{laurel_prefix}.linear_right.weight", + transpose_dims=(1, 0), + ) + self._port_rms_norm( + layer.laurel.post_laurel_norm, + f"{laurel_prefix}.post_laurel_norm", + ) + + # Per-layer inputs + self._port_weights( + layer.per_layer_input_gate, + f"{layer_prefix}.per_layer_input_gate.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + layer.per_layer_projection, + f"{layer_prefix}.per_layer_projection.weight", + transpose_dims=(1, 0), + ) + self._port_rms_norm( + layer.post_per_layer_input_norm, + f"{layer_prefix}.post_per_layer_input_norm", + ) + + def _port_audio_tower(self, keras_model): + print(" -> Porting audio tower...") + audio_encoder = keras_model.audio_encoder + hf_prefix = "model.audio_tower" + + ssp = audio_encoder.subsample_conv_projection + ssp_prefix = f"{hf_prefix}.subsample_conv_projection" + self._port_weights( + ssp.conv_0.conv, + f"{ssp_prefix}.conv_0.conv.weight", + transpose_dims=(2, 3, 1, 0), + ) + self._port_weights( + ssp.conv_0.norm.scale, f"{ssp_prefix}.conv_0.norm.weight" + ) + self._port_weights( + ssp.conv_1.conv, + f"{ssp_prefix}.conv_1.conv.weight", + transpose_dims=(2, 3, 1, 0), + ) + self._port_weights( + ssp.conv_1.norm.scale, f"{ssp_prefix}.conv_1.norm.weight" + ) + self._port_weights( + ssp.input_proj_linear, + f"{ssp_prefix}.input_proj_linear.weight", + transpose_dims=(1, 0), + ) + + for i, block in enumerate(audio_encoder.conformer): + block_prefix = f"{hf_prefix}.conformer.{i}" + ffw_start_prefix = f"{block_prefix}.ffw_layer_start" + self._port_rms_norm( + block.ffw_layer_start.pre_layer_norm, + f"{ffw_start_prefix}.pre_layer_norm", + ) + self._port_weights( + block.ffw_layer_start.ffw_layer_1, + f"{ffw_start_prefix}.ffw_layer_1.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + block.ffw_layer_start.ffw_layer_2, + f"{ffw_start_prefix}.ffw_layer_2.weight", + transpose_dims=(1, 0), + ) + self._port_rms_norm( + block.ffw_layer_start.post_layer_norm, + f"{ffw_start_prefix}.post_layer_norm", + ) + + attn_prefix = f"{block_prefix}.attention" + self._port_rms_norm( + block.attention.pre_attn_norm, f"{attn_prefix}.pre_attn_norm" + ) + self._port_weights( + block.attention.attn.per_dim_scale, + f"{attn_prefix}.attn.per_dim_scale", + ) + self._port_weights( + block.attention.attn.relative_position_embedding.pos_proj, + f"{attn_prefix}.attn.relative_position_embedding.pos_proj.weight", # noqa: E501 + transpose_dims=(1, 0), + ) + self._port_weights( + block.attention.attn.q_proj, + f"{attn_prefix}.attn.q_proj.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + block.attention.attn.k_proj, + f"{attn_prefix}.attn.k_proj.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + block.attention.attn.v_proj, + f"{attn_prefix}.attn.v_proj.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + block.attention.post, + f"{attn_prefix}.post.weight", + transpose_dims=(1, 0), + ) + self._port_rms_norm( + block.attention.post_norm, f"{attn_prefix}.post_norm" + ) + + lconv_prefix = f"{block_prefix}.lconv1d" + self._port_rms_norm( + block.lconv1d.pre_layer_norm, f"{lconv_prefix}.pre_layer_norm" + ) + self._port_weights( + block.lconv1d.linear_start, + f"{lconv_prefix}.linear_start.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + block.lconv1d.depthwise_conv1d, + f"{lconv_prefix}.depthwise_conv1d.weight", + transpose_dims=(2, 0, 1), + ) + self._port_rms_norm( + block.lconv1d.conv_norm, f"{lconv_prefix}.conv_norm" + ) + self._port_weights( + block.lconv1d.linear_end, + f"{lconv_prefix}.linear_end.weight", + transpose_dims=(1, 0), + ) + + ffw_end_prefix = f"{block_prefix}.ffw_layer_end" + self._port_rms_norm( + block.ffw_layer_end.pre_layer_norm, + f"{ffw_end_prefix}.pre_layer_norm", + ) + self._port_weights( + block.ffw_layer_end.ffw_layer_1, + f"{ffw_end_prefix}.ffw_layer_1.weight", + transpose_dims=(1, 0), + ) + self._port_weights( + block.ffw_layer_end.ffw_layer_2, + f"{ffw_end_prefix}.ffw_layer_2.weight", + transpose_dims=(1, 0), + ) + self._port_rms_norm( + block.ffw_layer_end.post_layer_norm, + f"{ffw_end_prefix}.post_layer_norm", + ) + self._port_rms_norm(block.norm, f"{block_prefix}.norm") + + def _port_multimodal_embedders(self, keras_model): + print(" -> Porting multimodal embedders...") + vision_prefix = "model.embed_vision" + self._port_weights( + keras_model.embed_vision.embedding, + f"{vision_prefix}.embedding.weight", + ) + self._port_rms_norm( + keras_model.embed_vision.hard_embedding_norm, + f"{vision_prefix}.hard_embedding_norm", + ) + self._port_rms_norm( + keras_model.embed_vision.soft_embedding_norm, + f"{vision_prefix}.soft_embedding_norm", + ) + self._port_weights( + keras_model.embed_vision.embedding_projection, + f"{vision_prefix}.embedding_projection.weight", + transpose_dims=(1, 0), + ) + + audio_prefix = "model.embed_audio" + self._port_weights( + keras_model.embed_audio.embedding, + f"{audio_prefix}.embedding.weight", + ) + self._port_rms_norm( + keras_model.embed_audio.hard_embedding_norm, + f"{audio_prefix}.hard_embedding_norm", + ) + self._port_rms_norm( + keras_model.embed_audio.soft_embedding_norm, + f"{audio_prefix}.soft_embedding_norm", + ) + self._port_weights( + keras_model.embed_audio.embedding_projection, + f"{audio_prefix}.embedding_projection.weight", + transpose_dims=(1, 0), + ) + + def convert(self, keras_model: Gemma3nBackbone): + print("🔶 Starting weight conversion...") + self._port_vision_tower(keras_model) + self._port_language_model(keras_model) + self._port_audio_tower(keras_model) + self._port_multimodal_embedders(keras_model) + print("✅ Full backbone weights converted.") + + +def validate_output(keras_model, hf_model, hf_processor): + print("🔶 Validating model outputs...") + image_size = hf_processor.image_processor.size + image = Image.new("RGB", (image_size["width"], image_size["height"])) + sampling_rate = hf_processor.feature_extractor.sampling_rate + audio_data = np.zeros(int(sampling_rate * 2.0)) + text = f"A cat sat on a mat{hf_processor.image_token}\n{hf_processor.audio_token}" # noqa: E501 + hf_inputs = hf_processor( + text=text, + images=image, + audio=[audio_data], + return_tensors="pt", + padding="longest", + ) + print(" -> Running HF model forward pass...") + with torch.no_grad(): + hf_output = hf_model.model(**hf_inputs).last_hidden_state + hf_output = hf_output.detach().cpu().float().numpy() + print(f" -> HF model output shape: {hf_output.shape}") + keras_inputs = {k: v.numpy() for k, v in hf_inputs.items()} + keras_inputs["token_ids"] = keras_inputs.pop("input_ids") + if "token_type_ids" in keras_inputs: + del keras_inputs["token_type_ids"] + keras_inputs["pixel_values"] = np.transpose( + keras_inputs["pixel_values"], (0, 2, 3, 1) + ) + if keras_inputs["pixel_values"].ndim == 4: + keras_inputs["pixel_values"] = np.expand_dims( + keras_inputs["pixel_values"], axis=1 + ) + input_shape = keras_inputs["token_ids"].shape + seq_len = input_shape[1] + attention_mask_2d = keras_inputs["attention_mask"] + attention_mask_4d = attention_mask_2d[:, None, None, :] + causal_mask = np.tril(np.ones((seq_len, seq_len), dtype=bool))[ + None, None, :, : + ] + final_mask = causal_mask & attention_mask_4d + keras_inputs["attention_mask"] = final_mask + print(" -> Running Keras model forward pass...") + keras_output = keras_model.predict(keras_inputs) + print(f" -> Keras model output shape: {keras_output.shape}") + mean_diff = np.mean(np.abs(keras_output - hf_output)) + print(f"🔶 Mean absolute difference: {mean_diff}") + + +def main(_): + preset = FLAGS.preset + hf_model_name = PRESET_MAP[preset] + cache_dir = FLAGS.cache_dir + save_path = preset + model_cache_path = os.path.join(cache_dir, f"{preset}_model") + processor_cache_path = os.path.join(cache_dir, f"{preset}_processor") + hf_model = None + hf_processor = None + if os.path.exists(model_cache_path) and os.path.exists( + processor_cache_path + ): + print( + " -> Loading cached Hugging Face model and processor from " + "{cache_dir}" + ) + try: + hf_model = Gemma3nForConditionalGeneration.from_pretrained( + model_cache_path, + torch_dtype=torch.bfloat16, + low_cpu_mem_usage=True, + ) + hf_processor = Gemma3nProcessor.from_pretrained( + processor_cache_path + ) + except Exception as e: + print(f"⚠️ Failed to load from cache: {e}. Downloading again...") + hf_model = None + hf_processor = None + if hf_model is None or hf_processor is None: + print(f" -> Downloading Hugging Face model: {hf_model_name}") + hf_model = Gemma3nForConditionalGeneration.from_pretrained( + hf_model_name, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True + ) + hf_processor = Gemma3nProcessor.from_pretrained(hf_model_name) + print(f"💾 Saving model and processor to cache: {cache_dir}") + os.makedirs(cache_dir, exist_ok=True) + hf_model.save_pretrained(model_cache_path) + hf_processor.save_pretrained(processor_cache_path) + hf_model.eval() + print("-> Creating Keras model from HF config.") + keras_model = convert_model(hf_model.config, dtype="bfloat16") + print("-> Converting weights from HF to Keras.") + converter = HfToKerasConverter(hf_model) + converter.convert(keras_model) + print("\n-> Validating output consistency.") + validate_output(keras_model, hf_model, hf_processor) + print(f"💾 Saving Keras preset to ./{save_path}") + keras_model.save_to_preset(f"./{save_path}") + print("🏁 Conversion complete.") + del hf_model + gc.collect() + + +if __name__ == "__main__": + app.run(main) From c2cb963dca2a0b748f29983845845b84e88487bd Mon Sep 17 00:00:00 2001 From: harshaljanjani Date: Tue, 4 Nov 2025 18:31:33 +0530 Subject: [PATCH 05/10] init: task model with tests --- .../src/models/gemma3n/gemma3n_attention.py | 90 +- .../models/gemma3n/gemma3n_audio_converter.py | 471 ++++++++ .../gemma3n/gemma3n_audio_converter_test.py | 123 ++ .../src/models/gemma3n/gemma3n_backbone.py | 61 +- .../models/gemma3n/gemma3n_backbone_test.py | 21 +- .../src/models/gemma3n/gemma3n_causal_lm.py | 499 ++++++++ .../gemma3n/gemma3n_causal_lm_preprocessor.py | 1001 +++++++++++++++++ .../gemma3n_causal_lm_preprocessor_test.py | 321 ++++++ .../models/gemma3n/gemma3n_causal_lm_test.py | 364 ++++++ .../models/gemma3n/gemma3n_image_converter.py | 14 + .../models/gemma3n/gemma3n_text_decoder.py | 38 +- .../src/models/gemma3n/gemma3n_text_layers.py | 8 +- .../src/models/gemma3n/gemma3n_text_model.py | 84 +- .../src/models/gemma3n/gemma3n_tokenizer.py | 95 ++ .../models/gemma3n/gemma3n_tokenizer_test.py | 32 + .../src/tests/mocks/mock_gemma3n_tokenizer.py | 159 +++ .../tests/test_data/gemma3n_test_vocab.spm | Bin 0 -> 238204 bytes .../convert_gemma3n_checkpoints.py | 51 +- .../create_gemma3n_test_proto.py | 34 + 19 files changed, 3377 insertions(+), 89 deletions(-) create mode 100644 keras_hub/src/models/gemma3n/gemma3n_audio_converter.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_audio_converter_test.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_causal_lm.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_image_converter.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_tokenizer.py create mode 100644 keras_hub/src/models/gemma3n/gemma3n_tokenizer_test.py create mode 100644 keras_hub/src/tests/mocks/mock_gemma3n_tokenizer.py create mode 100644 keras_hub/src/tests/test_data/gemma3n_test_vocab.spm create mode 100644 tools/sentencepiece_testing/create_gemma3n_test_proto.py diff --git a/keras_hub/src/models/gemma3n/gemma3n_attention.py b/keras_hub/src/models/gemma3n/gemma3n_attention.py index 954b229b5e..0d02074bd3 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_attention.py +++ b/keras_hub/src/models/gemma3n/gemma3n_attention.py @@ -336,7 +336,14 @@ def build(self, input_shape): super().build(input_shape) def call( - self, hidden_states, position_embeddings, attention_mask, training=False + self, + hidden_states, + position_embeddings, + attention_mask, + cache=None, + cache_update_index=0, + cache_update_mask=None, + training=False, ): input_shape = keras.ops.shape(hidden_states)[:-1] cos, sin = position_embeddings @@ -351,20 +358,71 @@ def call( query_states, cos, sin, unsqueeze_dim=2 ) query_states = keras.ops.transpose(query_states, (0, 2, 1, 3)) - key_states = self.k_proj(hidden_states) - key_states = keras.ops.reshape( - key_states, input_shape + (self.num_key_value_heads, self.head_dim) - ) - key_states = self.k_norm(key_states) - key_states = apply_rotary_pos_emb(key_states, cos, sin, unsqueeze_dim=2) - key_states = keras.ops.transpose(key_states, (0, 2, 1, 3)) - value_states = self.v_proj(hidden_states) - value_states = keras.ops.reshape( - value_states, - input_shape + (self.num_key_value_heads, self.head_dim), - ) - value_states = self.v_norm(value_states) - value_states = keras.ops.transpose(value_states, (0, 2, 1, 3)) + if cache is not None: + key_cache = cache[:, 0, ...] + value_cache = cache[:, 1, ...] + key_update = self.k_proj(hidden_states) + key_update = keras.ops.reshape( + key_update, + input_shape + (self.num_key_value_heads, self.head_dim), + ) + key_update = self.k_norm(key_update) + key_update = apply_rotary_pos_emb( + key_update, cos, sin, unsqueeze_dim=2 + ) + key_update = keras.ops.transpose(key_update, (0, 2, 1, 3)) + value_update = self.v_proj(hidden_states) + value_update = keras.ops.reshape( + value_update, + input_shape + (self.num_key_value_heads, self.head_dim), + ) + value_update = self.v_norm(value_update) + value_update = keras.ops.transpose(value_update, (0, 2, 1, 3)) + start = [0, 0, cache_update_index, 0] + if cache_update_mask is not None: + cache_update_mask = keras.ops.expand_dims( + keras.ops.expand_dims(cache_update_mask, axis=1), + axis=-1, + ) + key_original = keras.ops.slice( + key_cache, start, keras.ops.shape(key_update) + ) + value_original = keras.ops.slice( + value_cache, start, keras.ops.shape(value_update) + ) + key_update = keras.ops.where( + cache_update_mask, + key_update, + key_original, + ) + value_update = keras.ops.where( + cache_update_mask, + value_update, + value_original, + ) + key_states = keras.ops.slice_update(key_cache, start, key_update) + value_states = keras.ops.slice_update( + value_cache, start, value_update + ) + cache = keras.ops.stack((key_states, value_states), axis=1) + else: + key_states = self.k_proj(hidden_states) + key_states = keras.ops.reshape( + key_states, + input_shape + (self.num_key_value_heads, self.head_dim), + ) + key_states = self.k_norm(key_states) + key_states = apply_rotary_pos_emb( + key_states, cos, sin, unsqueeze_dim=2 + ) + key_states = keras.ops.transpose(key_states, (0, 2, 1, 3)) + value_states = self.v_proj(hidden_states) + value_states = keras.ops.reshape( + value_states, + input_shape + (self.num_key_value_heads, self.head_dim), + ) + value_states = self.v_norm(value_states) + value_states = keras.ops.transpose(value_states, (0, 2, 1, 3)) attn_output, attn_weights = eager_attention_forward( query_states, key_states, @@ -377,6 +435,8 @@ def call( ) attn_output = keras.ops.reshape(attn_output, input_shape + (-1,)) attn_output = self.o_proj(attn_output) + if cache is not None: + return attn_output, attn_weights, cache return attn_output, attn_weights def get_config(self): diff --git a/keras_hub/src/models/gemma3n/gemma3n_audio_converter.py b/keras_hub/src/models/gemma3n/gemma3n_audio_converter.py new file mode 100644 index 0000000000..aa15e22dee --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_audio_converter.py @@ -0,0 +1,471 @@ +import math + +import keras +import numpy as np + +try: + import tensorflow as tf +except ImportError: + tf = None + + +class Gemma3nAudioConverter(keras.layers.Layer): + """Converts raw audio waveforms into log-mel spectrograms. + + This layer preprocesses 1D audio signals into 2D log-mel spectrograms + suitable for the Gemma3n audio encoder. The conversion process involves + padding or truncating the raw audio to a consistent length, applying + optional dithering, input scaling, and preemphasis, and then computing the + Short-Time Fourier Transform (STFT) with a Hann window. The resulting + magnitude spectrogram is converted to the mel scale using a mel filterbank, + after which the log-mel spectrogram is calculated by taking the logarithm. + Finally, the layer can optionally normalize these features using provided + per-bin mean and standard deviation statistics, and it returns both the + spectrogram and an attention mask indicating which frames are valid. + + Args: + feature_size: int. The number of mel bins to generate. + Defaults to 128. + sampling_rate: int. The expected sampling rate of the input audio. + Defaults to 16000. + padding_value: float. The value to use for padding the raw audio. + Defaults to 0.0. + return_attention_mask: bool. Whether to return an attention mask. + Defaults to True. + frame_length_ms: float. The length of each STFT frame in + milliseconds. Defaults to 32.0. + hop_length_ms: float. The step size between STFT frames in + milliseconds. Defaults to 10.0. + min_frequency: float. The lowest frequency for the mel filterbank. + Defaults to 125.0. + max_frequency: float. The highest frequency for the mel filterbank. + Defaults to 7600.0. + preemphasis: float. The coefficient for the preemphasis filter. + Set to 0.0 to disable. Defaults to 0.97. + preemphasis_htk_flavor: bool. Whether to use the HTK-style + preemphasis. Defaults to True. + fft_overdrive: bool. If True, doubles the FFT length. + Defaults to True. + dither: float. Amount of dithering to add to the waveform. + Set to 0.0 to disable. Defaults to 0.0. + input_scale_factor: float. Factor to scale the input waveform by. + Defaults to 1.0. + mel_floor: float. A minimum value (floor) to apply before taking + the logarithm. Defaults to 1e-5. + per_bin_mean: list or None. A list of mean values for each mel + bin, used for normalization. Defaults to None. + per_bin_stddev: list or None. A list of standard deviation values + for each mel bin, used for normalization. Defaults to None. + padding_side: str. Which side to pad the audio on ('right' or + 'left'). Defaults to 'right'. + """ + + def __init__( + self, + feature_size=128, + sampling_rate=16000, + padding_value=0.0, + return_attention_mask=True, + frame_length_ms=32.0, + hop_length_ms=10.0, + min_frequency=125.0, + max_frequency=7600.0, + preemphasis=0.97, + preemphasis_htk_flavor=True, + fft_overdrive=True, + dither=0.0, + input_scale_factor=1.0, + mel_floor=1e-5, + per_bin_mean=None, + per_bin_stddev=None, + padding_side="right", + **kwargs, + ): + # === Config === + super().__init__(**kwargs) + self.feature_size = feature_size + self.sampling_rate = sampling_rate + self.padding_value = padding_value + self.return_attention_mask = return_attention_mask + self.padding_side = padding_side + self.min_frequency = min_frequency + self.max_frequency = max_frequency + self.preemphasis = preemphasis + self.preemphasis_htk_flavor = preemphasis_htk_flavor + self.fft_overdrive = fft_overdrive + self.dither = dither + self.input_scale_factor = input_scale_factor + self.frame_length_ms = frame_length_ms + self.hop_length_ms = hop_length_ms + self.mel_floor_arg = mel_floor + self.per_bin_mean_arg = per_bin_mean + self.per_bin_stddev_arg = per_bin_stddev + self.frame_length = int(round(sampling_rate * frame_length_ms / 1000.0)) + self.hop_length = int(round(sampling_rate * hop_length_ms / 1000.0)) + self.mel_floor = tf.constant(mel_floor, dtype=self.compute_dtype) + fft_length = 2 ** math.ceil(math.log2(self.frame_length)) + if self.fft_overdrive: + fft_length *= 2 + self.fft_length = fft_length + hann_arange = tf.range(self.frame_length, dtype=self.compute_dtype) + self.window = 0.5 * ( + 1 - tf.cos(2 * np.pi * hann_arange / self.frame_length) + ) + self.mel_filters = self._create_fb_matrix( + n_freqs=self.fft_length // 2 + 1, + f_min=min_frequency, + f_max=max_frequency, + n_mels=feature_size, + sample_rate=self.sampling_rate, + fft_length=fft_length, + ) + if per_bin_mean is not None: + self.per_bin_mean = tf.constant( + per_bin_mean, + shape=(1, 1, feature_size), + dtype=self.compute_dtype, + ) + else: + self.per_bin_mean = None + if per_bin_stddev is not None: + self.per_bin_stddev = tf.constant( + per_bin_stddev, + shape=(1, 1, feature_size), + dtype=self.compute_dtype, + ) + else: + self.per_bin_stddev = None + self._convert_input_args = False + self._allow_non_tensor_positional_args = True + self.built = True + + def _create_fb_matrix( + self, + n_freqs, + f_min, + f_max, + n_mels, + sample_rate, + fft_length, + ): + all_freqs = tf.cast(tf.range(n_freqs), dtype=self.compute_dtype) * ( + sample_rate / fft_length + ) + m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0)) + m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0)) + m_pts = np.linspace(m_min, m_max, n_mels + 2, dtype=np.float32) + f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0) + f_pts = tf.constant(f_pts, dtype=self.compute_dtype) + f_diff = f_pts[1:] - f_pts[:-1] + slopes = tf.expand_dims(f_pts, 0) - tf.expand_dims(all_freqs, 1) + zero = tf.zeros(1, dtype=self.compute_dtype) + down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] + up_slopes = slopes[:, 2:] / f_diff[1:] + fb = tf.maximum(zero, tf.minimum(down_slopes, up_slopes)) + return tf.constant(fb, dtype=self.compute_dtype) + + def _extract_spectrogram(self, waveform, attention_mask): + waveform = tf.cast(waveform, dtype=self.compute_dtype) + if self.dither > 0.0: + waveform = waveform + self.dither * tf.random.normal( + tf.shape(waveform), dtype=waveform.dtype + ) + if self.input_scale_factor != 1.0: + waveform = waveform * self.input_scale_factor + if self.preemphasis > 0.0: + if self.preemphasis_htk_flavor: + first_sample = waveform[:, :1] * (1.0 - self.preemphasis) + rest_of_samples = ( + waveform[:, 1:] - self.preemphasis * waveform[:, :-1] + ) + waveform = tf.concat([first_sample, rest_of_samples], axis=-1) + else: + waveform = tf.concat( + [ + waveform[:, :1], + waveform[:, 1:] - self.preemphasis * waveform[:, :-1], + ], + axis=-1, + ) + frames = tf.signal.frame( + waveform, + frame_length=self.frame_length, + frame_step=self.hop_length, + pad_end=False, + ) + frames = frames * self.window + pad_length = self.fft_length - self.frame_length + paddings = [[0, 0], [0, 0], [0, pad_length]] + frames = tf.pad(frames, paddings) + stft = tf.signal.rfft(frames) + magnitude_spec = tf.abs(stft) + mel_spec = tf.matmul(magnitude_spec, self.mel_filters) + log_mel_spec = tf.math.log(tf.maximum(mel_spec, self.mel_floor)) + if self.per_bin_mean is not None: + log_mel_spec = log_mel_spec - self.per_bin_mean + if self.per_bin_stddev is not None: + log_mel_spec = log_mel_spec / self.per_bin_stddev + mel_spectrogram = tf.squeeze(log_mel_spec, axis=0) + mask = tf.cast(attention_mask[:: self.hop_length], dtype=tf.bool) + return mel_spectrogram, mask[: tf.shape(mel_spectrogram)[0]] + + def _get_padding_strategies(self, padding=False, max_length=None): + if padding is not False: + if padding is True: + padding_strategy = "longest" + else: + padding_strategy = padding + else: + padding_strategy = "do_not_pad" + if max_length is None: + if padding_strategy == "max_length": + raise ValueError( + "When setting padding='max_length', max_length must be " + "defined" + ) + if padding_strategy != "do_not_pad" and (self.padding_value is None): + raise ValueError("Padding requested but no padding_value defined") + return padding_strategy + + def _pad( + self, + input_features, + attention_mask=None, + max_length=None, + padding_strategy="do_not_pad", + pad_to_multiple_of=None, + return_attention_mask=None, + ): + required_input = input_features + if padding_strategy == "longest": + max_length = len(required_input) + if ( + max_length is not None + and pad_to_multiple_of is not None + and (max_length % pad_to_multiple_of != 0) + ): + max_length = ( + (max_length // pad_to_multiple_of) + 1 + ) * pad_to_multiple_of + needs_to_be_padded = ( + padding_strategy != "do_not_pad" + and len(required_input) < max_length + ) + if return_attention_mask and attention_mask is None: + attention_mask = np.ones(len(required_input), dtype=np.int32) + if needs_to_be_padded: + difference = max_length - len(required_input) + if self.padding_side == "right": + if return_attention_mask: + attention_mask = np.pad(attention_mask, (0, difference)) + if required_input.ndim > 1: + padding_shape = ((0, difference), (0, 0)) + else: + padding_shape = ((0, difference),) + input_features = np.pad( + required_input, + padding_shape, + "constant", + constant_values=self.padding_value, + ) + elif self.padding_side == "left": + if return_attention_mask: + attention_mask = np.pad(attention_mask, (difference, 0)) + if required_input.ndim > 1: + padding_shape = ((difference, 0), (0, 0)) + else: + padding_shape = ((difference, 0),) + input_features = np.pad( + required_input, + padding_shape, + "constant", + constant_values=self.padding_value, + ) + return input_features, attention_mask + + def _truncate( + self, + input_features, + attention_mask=None, + max_length=None, + pad_to_multiple_of=None, + truncation=None, + ): + if not truncation: + return input_features, attention_mask + elif truncation and max_length is None: + raise ValueError( + "When setting truncation=True, max_length must be defined" + ) + required_input = input_features + if ( + max_length is not None + and pad_to_multiple_of is not None + and (max_length % pad_to_multiple_of != 0) + ): + max_length = ( + (max_length // pad_to_multiple_of) + 1 + ) * pad_to_multiple_of + needs_to_be_truncated = len(required_input) > max_length + if needs_to_be_truncated: + input_features = input_features[:max_length] + if attention_mask is not None: + attention_mask = attention_mask[:max_length] + return input_features, attention_mask + + def pad( + self, + input_features, + padding=True, + max_length=None, + truncation=False, + pad_to_multiple_of=None, + return_attention_mask=None, + ): + required_input = input_features + return_attention_mask = ( + return_attention_mask + if return_attention_mask is not None + else self.return_attention_mask + ) + if len(required_input) == 0: + return [], [] if return_attention_mask else None + required_input = [np.asarray(v) for v in required_input] + padding_strategy = self._get_padding_strategies( + padding=padding, max_length=max_length + ) + batch_size = len(required_input) + truncated_inputs = [] + truncated_masks = [] + for i in range(batch_size): + inputs = required_input[i] + mask = ( + np.ones(len(inputs), dtype=np.int32) + if return_attention_mask + else None + ) + inputs_slice, mask_slice = self._truncate( + inputs, + attention_mask=mask, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + truncation=truncation, + ) + truncated_inputs.append(inputs_slice) + if mask_slice is not None: + truncated_masks.append(mask_slice) + if padding_strategy == "longest": + max_length = max( + len(input_slice) for input_slice in truncated_inputs + ) + padding_strategy = "max_length" + batch_outputs_features = [] + batch_outputs_masks = [] + for i in range(batch_size): + inputs = truncated_inputs[i] + mask = truncated_masks[i] if return_attention_mask else None + outputs_features, outputs_mask = self._pad( + inputs, + attention_mask=mask, + max_length=max_length, + padding_strategy=padding_strategy, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + if outputs_features.dtype == np.dtype(np.float64): + outputs_features = outputs_features.astype(np.float32) + batch_outputs_features.append(outputs_features) + if outputs_mask is not None: + batch_outputs_masks.append(outputs_mask) + if not return_attention_mask: + return batch_outputs_features, None + return batch_outputs_features, batch_outputs_masks + + def call( + self, + raw_speech, + padding="longest", + max_length=480000, + truncation=True, + pad_to_multiple_of=128, + return_attention_mask=True, + ): + def _process_in_py(raw_speech_tensor): + raw_speech_np = raw_speech_tensor.numpy() + is_batched = raw_speech_np.ndim > 1 + if is_batched: + speech_list = [rs.reshape(-1, 1) for rs in raw_speech_np] + else: + raw_speech_np = np.atleast_1d(raw_speech_np) + speech_list = [raw_speech_np.reshape(-1, 1)] + input_features_list, attention_mask_list = self.pad( + speech_list, + padding=padding, + max_length=max_length, + truncation=truncation, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + prepared_speech = [] + prepared_speech_mask = [] + for speech, mask in zip(input_features_list, attention_mask_list): + speech_tensor = tf.constant(speech.T, dtype=self.compute_dtype) + mask_tensor = tf.constant(mask, dtype=tf.int32) + features, feature_mask = self._extract_spectrogram( + speech_tensor, mask_tensor + ) + prepared_speech.append(features) + prepared_speech_mask.append(feature_mask) + input_features = tf.stack(prepared_speech) + input_features_mask = tf.stack(prepared_speech_mask) + if not is_batched: + input_features = tf.squeeze(input_features, axis=0) + input_features_mask = tf.squeeze(input_features_mask, axis=0) + return input_features, input_features_mask + + if not isinstance(raw_speech, (tf.Tensor, tf.RaggedTensor)): + was_batched = isinstance(raw_speech, (list, tuple)) + raw_speech = tf.convert_to_tensor( + raw_speech, dtype=self.compute_dtype + ) + else: + was_batched = raw_speech.shape.rank > 1 + input_features, input_features_mask = tf.py_function( + _process_in_py, + inp=[raw_speech], + Tout=[self.compute_dtype, tf.bool], + ) + num_frames = None + if was_batched: + input_features.set_shape([None, num_frames, self.feature_size]) + input_features_mask.set_shape([None, num_frames]) + else: + input_features.set_shape([num_frames, self.feature_size]) + input_features_mask.set_shape([num_frames]) + input_features_mask = tf.cast(input_features_mask, dtype="int32") + return input_features, input_features_mask + + def get_config(self): + config = super().get_config() + config.update( + { + "feature_size": self.feature_size, + "sampling_rate": self.sampling_rate, + "padding_value": self.padding_value, + "return_attention_mask": self.return_attention_mask, + "frame_length_ms": self.frame_length_ms, + "hop_length_ms": self.hop_length_ms, + "min_frequency": self.min_frequency, + "max_frequency": self.max_frequency, + "preemphasis": self.preemphasis, + "preemphasis_htk_flavor": self.preemphasis_htk_flavor, + "fft_overdrive": self.fft_overdrive, + "dither": self.dither, + "input_scale_factor": self.input_scale_factor, + "mel_floor": self.mel_floor_arg, + "per_bin_mean": self.per_bin_mean_arg, + "per_bin_stddev": self.per_bin_stddev_arg, + "padding_side": self.padding_side, + } + ) + return config diff --git a/keras_hub/src/models/gemma3n/gemma3n_audio_converter_test.py b/keras_hub/src/models/gemma3n/gemma3n_audio_converter_test.py new file mode 100644 index 0000000000..78dd912a0f --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_audio_converter_test.py @@ -0,0 +1,123 @@ +import numpy as np + +from keras_hub.src.models.gemma3n.gemma3n_audio_converter import ( + Gemma3nAudioConverter, +) +from keras_hub.src.tests.test_case import TestCase + + +class Gemma3nAudioConverterTest(TestCase): + def setUp(self): + super().setUp() + self.feature_size = 128 + self.sampling_rate = 16000 + self.hop_length_ms = 10.0 + self.frame_length_ms = 32.0 + # Dummy audio. + self.input_data = [ + np.sin( + 2 + * np.pi + * 440 + * np.linspace(0, 1, self.sampling_rate, dtype=np.float32) + ) + ] + self.init_kwargs = { + "feature_size": self.feature_size, + "sampling_rate": self.sampling_rate, + "padding_value": 0.0, + "return_attention_mask": True, + "frame_length_ms": self.frame_length_ms, + "hop_length_ms": self.hop_length_ms, + "min_frequency": 125.0, + "max_frequency": 7600.0, + "preemphasis": 0.97, + "preemphasis_htk_flavor": True, + "fft_overdrive": True, + "dither": 0.0, + "input_scale_factor": 1.0, + "mel_floor": 1e-5, + "per_bin_mean": None, + "per_bin_stddev": None, + "padding_side": "right", + } + + def test_output_shape(self): + converter = Gemma3nAudioConverter(**self.init_kwargs) + outputs = converter(self.input_data[0]) + frame_length = int( + round(self.sampling_rate * self.frame_length_ms / 1000.0) + ) + hop_length = int( + round(self.sampling_rate * self.hop_length_ms / 1000.0) + ) + num_frames = (len(self.input_data[0]) - frame_length) // hop_length + 1 + expected_features_shape = (num_frames, self.feature_size) + expected_mask_shape = (num_frames,) + # Check that the outputs are tuples with two elements. + self.assertIsInstance(outputs, tuple) + self.assertEqual(len(outputs), 2) + input_features, input_features_mask = outputs + self.assertEqual(input_features.shape, expected_features_shape) + self.assertEqual(input_features_mask.shape, expected_mask_shape) + + def test_padding(self): + max_length = 20000 + pad_to_multiple_of = 128 + converter = Gemma3nAudioConverter(**self.init_kwargs) + outputs = converter( + self.input_data[0], + padding="max_length", + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + ) + # Calculate expectations. + if max_length % pad_to_multiple_of != 0: + padded_length = ( + (max_length // pad_to_multiple_of) + 1 + ) * pad_to_multiple_of + else: + padded_length = max_length + frame_length = int( + round(self.sampling_rate * self.frame_length_ms / 1000.0) + ) + hop_length = int( + round(self.sampling_rate * self.hop_length_ms / 1000.0) + ) + num_frames = (padded_length - frame_length) // hop_length + 1 + expected_features_shape = (num_frames, self.feature_size) + # Check that the outputs are tuples with two elements. + self.assertIsInstance(outputs, tuple) + self.assertEqual(len(outputs), 2) + input_features, _ = outputs + self.assertEqual(input_features.shape, expected_features_shape) + + def test_normalization(self): + mean = np.random.rand(self.feature_size).tolist() + stddev = np.random.rand(self.feature_size).tolist() + # One converter with normalization and one without. + converter_no_norm = Gemma3nAudioConverter(**self.init_kwargs) + norm_kwargs = self.init_kwargs.copy() + norm_kwargs["per_bin_mean"] = mean + norm_kwargs["per_bin_stddev"] = stddev + converter_norm = Gemma3nAudioConverter(**norm_kwargs) + outputs_no_norm = converter_no_norm(self.input_data) + outputs_norm = converter_norm(self.input_data) + # Check that the outputs are tuples with two elements. + self.assertIsInstance(outputs_no_norm, tuple) + self.assertEqual(len(outputs_no_norm), 2) + self.assertIsInstance(outputs_norm, tuple) + self.assertEqual(len(outputs_norm), 2) + features_no_norm, _ = outputs_no_norm + features_norm, _ = outputs_norm + # We would want outputs to be different. + self.assertNotAllClose(features_no_norm, features_norm) + # Manually normalize and check for closeness. + manual_norm_features = (features_no_norm - np.array(mean)) / np.array( + stddev + ) + self.assertAllClose(manual_norm_features, features_norm) + + def test_serialization(self): + instance = Gemma3nAudioConverter(**self.init_kwargs) + self.run_serialization_test(instance=instance) diff --git a/keras_hub/src/models/gemma3n/gemma3n_backbone.py b/keras_hub/src/models/gemma3n/gemma3n_backbone.py index 3440ad6622..3c519a39db 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_backbone.py +++ b/keras_hub/src/models/gemma3n/gemma3n_backbone.py @@ -205,10 +205,17 @@ def call(self, inputs): per_layer_inputs = self.language_model.get_per_layer_inputs( per_layer_inputs_tokens ) - if self.vision_encoder: + if self.vision_encoder and self.embed_vision: + if self.embed_audio: + vision_upper_bound = self.embed_audio.vocab_offset + else: + vision_upper_bound = ( + self.embed_vision.vocab_offset + + self.embed_vision.vocab_size + ) vision_mask = keras.ops.logical_and( input_ids >= self.embed_vision.vocab_offset, - input_ids < self.embed_audio.vocab_offset, + input_ids < vision_upper_bound, ) dummy_vision_token_id = ( self.embed_vision.vocab_offset @@ -225,7 +232,7 @@ def call(self, inputs): vision_embeds_from_vocab, inputs_embeds, ) - if self.audio_encoder: + if self.audio_encoder and self.embed_audio: audio_mask = input_ids >= self.embed_audio.vocab_offset dummy_audio_token_id = ( self.embed_audio.vocab_offset @@ -300,15 +307,30 @@ def scatter_vision_features(): and input_features_mask is not None and self.audio_encoder ): + original_shape = keras.ops.shape(input_features) + b, n, t, f = ( + original_shape[0], + original_shape[1], + original_shape[2], + original_shape[3], + ) + input_features = keras.ops.reshape(input_features, (b * n, t, f)) + input_features_mask = keras.ops.reshape( + input_features_mask, (b * n, t) + ) audio_features, _ = self.audio_encoder( (input_features, input_features_mask) ) audio_embeds = self.embed_audio(audio_features) + audio_embeds_shape = keras.ops.shape(audio_embeds) + t_out, h = audio_embeds_shape[1], audio_embeds_shape[2] + audio_embeds = keras.ops.reshape(audio_embeds, (b, n, t_out, h)) shape = keras.ops.shape(audio_embeds) - audio_batch_size, audio_seq_len, hidden_size = ( + audio_batch_size, audio_num_clips, audio_seq_len, hidden_size = ( shape[0], shape[1], shape[2], + shape[3], ) target_len = self.audio_soft_tokens_per_image last_audio_token_id = ( @@ -330,7 +352,10 @@ def scatter_vision_features(): pad_token_index = keras.ops.shape(flat_audio_embeds)[0] indices = keras.ops.arange(target_len) is_real_token = indices < audio_seq_len - batch_offsets = keras.ops.arange(audio_batch_size) * audio_seq_len + batch_offsets = ( + keras.ops.arange(audio_batch_size * audio_num_clips) + * audio_seq_len + ) real_indices = keras.ops.expand_dims( indices, 0 ) + keras.ops.expand_dims(batch_offsets, 1) @@ -340,6 +365,10 @@ def scatter_vision_features(): pad_token_index, ) audio_embeds = keras.ops.take(vocab, final_indices, axis=0) + audio_embeds = keras.ops.reshape( + audio_embeds, + (audio_batch_size, audio_num_clips * target_len, hidden_size), + ) audio_token_mask = keras.ops.equal(input_ids, self.audio_token_id) def scatter_audio_features(): @@ -704,35 +733,35 @@ def __init__( token_ids_input = keras.Input( shape=(None,), dtype="int32", name="token_ids" ) - attention_mask_input = keras.Input( - shape=(None, None, None), dtype="bool", name="attention_mask" + padding_mask_input = keras.Input( + shape=(None,), dtype="bool", name="padding_mask" ) processor_inputs = { "token_ids": token_ids_input, } model_inputs = { "token_ids": token_ids_input, - "attention_mask": attention_mask_input, + "padding_mask": padding_mask_input, } # === Modality Feature Extraction and Interleaving === if self.vision_encoder: input_shape = (None,) + tuple(self.vision_encoder.image_shape) - pixel_values_input = keras.Input( + images_input = keras.Input( shape=input_shape, dtype="float32", - name="pixel_values", + name="images", ) - processor_inputs["pixel_values"] = pixel_values_input - model_inputs["pixel_values"] = pixel_values_input + processor_inputs["pixel_values"] = images_input + model_inputs["images"] = images_input if self.audio_encoder: input_features_input = keras.Input( - shape=(None, self.audio_encoder.input_feat_size), + shape=(None, None, self.audio_encoder.input_feat_size), dtype="float32", name="input_features", ) input_features_mask_input = keras.Input( - shape=(None,), dtype="bool", name="input_features_mask" + shape=(None, None), dtype="bool", name="input_features_mask" ) processor_inputs["input_features"] = input_features_input processor_inputs["input_features_mask"] = input_features_mask_input @@ -745,9 +774,11 @@ def __init__( # === Decoder layers === # The Gemma3nTextModel encapsulates the decoder loop and final norm. # It requires `input_ids` for its internal per-layer logic. + attention_mask = keras.ops.expand_dims(padding_mask_input, axis=1) + attention_mask = keras.ops.expand_dims(attention_mask, axis=1) sequence_output = self.language_model( token_ids_input, - attention_mask_input, + attention_mask, final_embeds, per_layer_inputs, ) diff --git a/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py b/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py index abad0a2f98..fdf513f334 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py +++ b/keras_hub/src/models/gemma3n/gemma3n_backbone_test.py @@ -25,6 +25,7 @@ def setUp(self): self.image_width = 32 self.audio_sequence_length = 8 self.audio_feature_size = 16 + # === Vision Encoder === vision_arch_def = [["er_r1_k3_s1_e1_c8"]] stackwise_params = convert_arch_def_to_stackwise(vision_arch_def) @@ -35,6 +36,7 @@ def setUp(self): use_msfa=False, ) vision_encoder_config = vision_encoder.get_config() + # === Audio Encoder === audio_encoder = Gemma3nAudioEncoder( hidden_size=4, @@ -55,6 +57,7 @@ def setUp(self): conf_conv_kernel_size=3, conf_reduction_factor=1, ) + # === Multimodal === self.multimodal_init_kwargs = { "text_vocab_size": self.text_vocab_size, @@ -86,27 +89,23 @@ def setUp(self): size=(self.batch_size, self.text_sequence_length), dtype="int32", ), - "attention_mask": np.ones( - ( - self.batch_size, - 1, - self.text_sequence_length, - self.text_sequence_length, - ), - dtype=bool, + "padding_mask": np.ones( + (self.batch_size, self.text_sequence_length), dtype=bool ), - "pixel_values": np.random.rand( + "images": np.random.rand( self.batch_size, 1, self.image_height, self.image_width, 3 ).astype("float32"), "input_features": np.random.rand( self.batch_size, + 1, self.audio_sequence_length, self.audio_feature_size, ).astype("float32"), "input_features_mask": np.zeros( - (self.batch_size, self.audio_sequence_length), dtype=bool + (self.batch_size, 1, self.audio_sequence_length), dtype=bool ), } + # === Text-Only === self.text_init_kwargs = deepcopy(self.multimodal_init_kwargs) del self.text_init_kwargs["vision_encoder_config"] @@ -114,7 +113,7 @@ def setUp(self): del self.text_init_kwargs["vision_hidden_size"] del self.text_init_kwargs["audio_hidden_size"] self.text_input_data = deepcopy(self.multimodal_input_data) - del self.text_input_data["pixel_values"] + del self.text_input_data["images"] del self.text_input_data["input_features"] del self.text_input_data["input_features_mask"] diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm.py new file mode 100644 index 0000000000..3807588f21 --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm.py @@ -0,0 +1,499 @@ +import keras +import numpy as np + +from keras_hub.src.api_export import keras_hub_export +from keras_hub.src.models.causal_lm import CausalLM +from keras_hub.src.models.gemma3n.gemma3n_backbone import Gemma3nBackbone +from keras_hub.src.models.gemma3n.gemma3n_causal_lm_preprocessor import ( + Gemma3nCausalLMPreprocessor, +) +from keras_hub.src.utils.tensor_utils import any_equal + +try: + import tensorflow as tf +except ImportError: + tf = None + + +@keras_hub_export("keras_hub.models.Gemma3nCausalLM") +class Gemma3nCausalLM(CausalLM): + """An end-to-end multimodal Gemma3n model for causal language modeling. + + A causal language model (LM) predicts the next token based on previous + tokens. This task setup can be used to train the model unsupervised on + images, audio, and plain text inputs, or to autoregressively generate plain + text similar to the data used for training. Note that the model is + image-audio-text in, text out. + + This model has a `generate()` method, which generates text based on a + prompt. The generation strategy used is controlled by an additional + `sampler` argument on `compile()`. You can recompile the model with + different `keras_hub.samplers` objects to control the generation. By + default, `"greedy"` sampling will be used. + + This model can optionally be configured with a `preprocessor` layer, in + which case it will automatically apply preprocessing to string inputs during + `fit()`, `predict()`, `evaluate()` and `generate()`. This is done by default + when creating the model with `from_preset()`. + + Args: + preprocessor: A `keras_hub.models.Gemma3nCausalLMPreprocessor` or + `None`. If `None`, this model will not apply preprocessing, and + inputs should be preprocessed before calling the model. + backbone: A `keras_hub.models.Gemma3nBackbone` instance. + + Examples: + ```python + import numpy as np + from keras_hub.models import Gemma3nCausalLM + + # === Text-only usage === + # Load a text-only Gemma3n model from preset. + causal_lm = Gemma3nCausalLM.from_preset("gemma3n_instruct_1b") + + # Generate text. + causal_lm.generate("What is the capital of France?", max_length=128) + + # === Vision + Text usage === + # Load a vision-text Gemma3n model from preset. + causal_lm = Gemma3nCausalLM.from_preset("gemma3n_instruct_4b") + + # Generate with image input. + image = np.ones((768, 768, 3), dtype="float32") + causal_lm.generate({ + "prompts": "Describe this image: ", + "images": image + }) + + # === Audio + Text usage === + # Load an audio-text Gemma3n model from preset. + causal_lm = Gemma3nCausalLM.from_preset("gemma3n_instruct_4b_audio") + + # Generate with audio input. + audio = np.ones((16000,), dtype="float32") + causal_lm.generate({ + "prompts": "Transcribe this audio: ", + "audios": audio + }) + + # === Vision + Audio + Text usage === + # Generate with both image and audio. + causal_lm.generate({ + "prompts": "Image: , Audio: ", + "images": image, + "audios": audio + }) + ``` + """ + + backbone_cls = Gemma3nBackbone + preprocessor_cls = Gemma3nCausalLMPreprocessor + + def __init__( + self, + preprocessor, + backbone, + **kwargs, + ): + # === Layers === + self.preprocessor = preprocessor + self.backbone = backbone + + # === Functional Model === + backbone_inputs = backbone.input.copy() + inputs = backbone_inputs.copy() + hidden_state = backbone(backbone_inputs) + is_text_only = "images" not in inputs and "input_features" not in inputs + if not is_text_only: + if "images" not in inputs: + inputs["images"] = keras.Input( + shape=(None, None, None, 3), name="images", dtype="float32" + ) + inputs["vision_indices"] = keras.Input( + shape=(None,), dtype="int32", name="vision_indices" + ) + inputs["vision_mask"] = keras.Input( + shape=(None,), dtype="bool", name="vision_mask" + ) + if "input_features" not in inputs: + inputs["input_features"] = keras.Input( + shape=(None, None, None), + name="input_features", + dtype="float32", + ) + inputs["input_features_mask"] = keras.Input( + shape=(None, None), name="input_features_mask", dtype="bool" + ) + inputs["audios"] = keras.Input( + shape=(None, None), dtype="float32", name="audios" + ) + inputs["audio_indices"] = keras.Input( + shape=(None,), dtype="int32", name="audio_indices" + ) + inputs["audio_mask"] = keras.Input( + shape=(None,), dtype="bool", name="audio_mask" + ) + outputs = backbone.language_model.token_embedding( + hidden_state, reverse=True + ) + super().__init__( + inputs=inputs, + outputs=outputs, + **kwargs, + ) + + def compile( + self, + optimizer="auto", + loss="auto", + *, + weighted_metrics="auto", + sampler="greedy", + **kwargs, + ): + super().compile( + optimizer=optimizer, + loss=loss, + weighted_metrics=weighted_metrics, + sampler=sampler, + **kwargs, + ) + + def _normalize_generate_inputs( + self, + inputs, + ): + if tf and isinstance(inputs, tf.data.Dataset): + return inputs.as_numpy_iterator(), False + if self.preprocessor is None: + return [inputs], False + + def normalize(x): + if isinstance(x, str): + return [x], True + if tf and isinstance(x, tf.Tensor) and x.shape.rank == 0: + return x[tf.newaxis], True + return x, False + + if isinstance(inputs, dict): + inputs["prompts"], input_is_scalar = normalize(inputs["prompts"]) + # Handle unbatched image input. + if "images" in inputs and input_is_scalar: + x = inputs["images"] + if isinstance(x, np.ndarray) and len(x.shape) == 3: + inputs["images"] = [x] + elif tf and isinstance(x, tf.Tensor) and x.shape.rank == 3: + inputs["images"] = x[tf.newaxis] + elif isinstance(x, list): + inputs["images"] = [x] + # Handle unbatched audio input. + if "audios" in inputs and input_is_scalar: + x = inputs["audios"] + if isinstance(x, np.ndarray) and len(x.shape) == 1: + inputs["audios"] = [x] + elif tf and isinstance(x, tf.Tensor) and x.shape.rank == 1: + inputs["audios"] = x[tf.newaxis] + elif isinstance(x, list): + inputs["audios"] = [x] + if "responses" in inputs: + inputs["responses"], _ = normalize(inputs["responses"]) + else: + inputs, input_is_scalar = normalize(inputs) + + return [inputs], input_is_scalar + + def call_with_cache( + self, + token_ids, + cache, + cache_update_index, + pixel_values=None, + input_features=None, + input_features_mask=None, + vision_indices=None, + audio_indices=None, + vision_mask=None, + audio_mask=None, + padding_mask=None, + cache_update_mask=None, + ): + """Forward pass of `Gemma3nCausalLM` with cache. + + `call_with_cache` adds an additional forward pass for the model for + autoregressive inference. Unlike calling the model directly, this method + allows caching previous key/value Tensors in multi-head attention layer, + and avoids recomputing the outputs of seen tokens. + + Args: + token_ids: A dense int Tensor with shape `(batch_size, max_length)`. + cache: A dense float Tensor, the cache of key and value. + cache_update_index: int, or int Tensor. The index of current inputs + in the whole sequence. + pixel_values: A dense float Tensor with shape + `(batch_size, num_images, height, width, channels)`. + input_features: A dense float Tensor with shape + `(batch_size, num_audios, audio_seq_len, feature_size)`. + input_features_mask: A dense bool Tensor with shape + `(batch_size, num_audios, audio_seq_len)`. + vision_indices: A dense int Tensor with shape + `(batch_size, num_vision_tokens)`. + audio_indices: A dense int Tensor with shape + `(batch_size, num_audio_tokens)`. + vision_mask: A dense bool Tensor with shape + `(batch_size, max_length)`. + audio_mask: A dense bool Tensor with shape + `(batch_size, max_length)`. + padding_mask: A dense int Tensor with shape + `(batch_size, max_length)`. + cache_update_mask: A dense bool Tensor for masking cache updates. + + Returns: + A (logits, hidden_states, cache) tuple. Where `logits` is the + language model logits for the input token_ids, `hidden_states` is + the final hidden representation of the input tokens, and `cache` is + the decoding cache. + """ + # TODO: Make design decisions for `vision_indices`, `audio_indices`, + # `vision_mask` and `audio_mask`. + # Build inputs dict for embedding processor. + processor_inputs = {"token_ids": token_ids} + if pixel_values is not None: + processor_inputs["pixel_values"] = pixel_values + if input_features is not None: + processor_inputs["input_features"] = input_features + processor_inputs["input_features_mask"] = input_features_mask + # Get embeddings and per-layer inputs. + inputs_embeds, per_layer_inputs = self.backbone.embedding_processor( + processor_inputs + ) + # Prepare attention mask for caching. + batch_size = keras.ops.shape(token_ids)[0] + max_length = keras.ops.shape(token_ids)[1] + # Create causal attention mask. + if padding_mask is None: + padding_mask = keras.ops.ones( + (batch_size, max_length), dtype="bool" + ) + attention_mask = keras.ops.cast(padding_mask, dtype="bool") + attention_mask = keras.ops.expand_dims(attention_mask, axis=1) + attention_mask = keras.ops.expand_dims(attention_mask, axis=1) + # Each decoder layer has a cache; we update them separately. + hidden_states, new_cache = self.backbone.language_model( + input_ids=token_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + per_layer_inputs=per_layer_inputs, + cache=cache, + cache_update_index=cache_update_index, + cache_update_mask=cache_update_mask, + ) + logits = self.backbone.language_model.token_embedding( + hidden_states, reverse=True + ) + return logits, hidden_states, new_cache + + def _build_cache( + self, + token_ids, + pixel_values, + input_features, + input_features_mask, + vision_indices, + audio_indices, + vision_mask, + audio_mask, + padding_mask, + ): + """Build an empty cache for use with `call_with_cache()`.""" + batch_size = keras.ops.shape(token_ids)[0] + max_length = keras.ops.shape(token_ids)[1] + num_layers = self.backbone.num_hidden_layers + num_heads = self.backbone.num_key_value_heads + head_dim = self.backbone.head_dim + shape = [batch_size, num_layers, 2, num_heads, max_length, head_dim] + cache = keras.ops.zeros(shape, dtype=self.compute_dtype) + # Seed the cache. + _, hidden_states, cache = self.call_with_cache( + token_ids=token_ids, + cache=cache, + cache_update_index=0, + pixel_values=pixel_values, + input_features=input_features, + input_features_mask=input_features_mask, + vision_indices=vision_indices, + audio_indices=audio_indices, + vision_mask=vision_mask, + audio_mask=audio_mask, + padding_mask=padding_mask, + cache_update_mask=None, + ) + return hidden_states, cache + + def generate_step(self, inputs, stop_token_ids=[106]): + """A compilable generation function for a single batch of inputs. + + This function represents the inner, XLA-compilable, generation function + for a single batch of inputs. Inputs should have the same structure as + model inputs, a dictionary with keys for token_ids, padding_mask, and + optionally images, audios, vision_mask, audio_mask, etc. + + Args: + inputs: A dictionary with keys for the model inputs including + `"token_ids"`, `"padding_mask"`, and optionally `"images"`, + `"audios"`, `"input_features"`, `"input_features_mask"`, + `"vision_mask"`, `"audio_mask"`, `"vision_indices"`, + `"audio_indices"`. + stop_token_ids: Tuple of id's of end token's to stop on. If all + sequences have produced a new stop token, generation + will stop. + """ + token_ids = inputs["token_ids"] + padding_mask = inputs["padding_mask"] + # Extract multimodal inputs. + images = inputs.get("images", None) + pixel_values = images + input_features = inputs.get("input_features", None) + input_features_mask = inputs.get("input_features_mask", None) + vision_indices = inputs.get("vision_indices", None) + audio_indices = inputs.get("audio_indices", None) + vision_mask = inputs.get("vision_mask", None) + audio_mask = inputs.get("audio_mask", None) + audios = inputs.get("audios", None) + # Handle unbatched inputs by adding batch dimension. + if pixel_values is not None and len(keras.ops.shape(pixel_values)) == 4: + pixel_values = keras.ops.expand_dims(pixel_values, axis=0) + if audios is not None and len(keras.ops.shape(audios)) == 2: + audios = keras.ops.expand_dims(audios, axis=0) + if vision_mask is not None and len(keras.ops.shape(vision_mask)) == 1: + vision_mask = keras.ops.expand_dims(vision_mask, axis=0) + if ( + vision_indices is not None + and len(keras.ops.shape(vision_indices)) == 1 + ): + vision_indices = keras.ops.expand_dims(vision_indices, axis=0) + if ( + input_features is not None + and len(keras.ops.shape(input_features)) == 2 + ): + input_features = keras.ops.expand_dims(input_features, axis=0) + if ( + input_features_mask is not None + and len(keras.ops.shape(input_features_mask)) == 1 + ): + input_features_mask = keras.ops.expand_dims( + input_features_mask, axis=0 + ) + if audio_mask is not None and len(keras.ops.shape(audio_mask)) == 1: + audio_mask = keras.ops.expand_dims(audio_mask, axis=0) + if ( + audio_indices is not None + and len(keras.ops.shape(audio_indices)) == 1 + ): + audio_indices = keras.ops.expand_dims(audio_indices, axis=0) + # Create and seed cache with a single forward pass. + hidden_states, cache = self._build_cache( + token_ids, + pixel_values, + input_features, + input_features_mask, + vision_indices, + audio_indices, + vision_mask, + audio_mask, + padding_mask, + ) + # Compute the lengths of all user inputted tokens ids. + row_lengths = keras.ops.sum( + keras.ops.cast(padding_mask, "int32"), axis=-1 + ) + # Start at the first index that has no user inputted id. + index = keras.ops.min(row_lengths) + + def next(prompt, cache, index): + # The cache index is the index of our previous token. + cache_update_index = index - 1 + batch_size = keras.ops.shape(prompt)[0] + prompt = keras.ops.slice(prompt, [0, index - 1], [batch_size, 1]) + sliced_cache_update_mask = keras.ops.slice( + ~padding_mask, [0, index - 1], [batch_size, 1] + ) + logits, hidden_states, cache = self.call_with_cache( + token_ids=prompt, + cache=cache, + cache_update_index=cache_update_index, + cache_update_mask=sliced_cache_update_mask, + ) + return ( + keras.ops.squeeze(logits, axis=1), + keras.ops.squeeze(hidden_states, axis=1), + cache, + ) + + token_ids = self.sampler( + next=next, + prompt=token_ids, + cache=cache, + index=index, + mask=padding_mask, + stop_token_ids=stop_token_ids, + hidden_states=hidden_states, + model=self, + ) + # Compute an output padding mask with the token ids we updated. + if stop_token_ids is not None: + # Build a mask of `stop_token_ids` locations not in the original + # prompt (not in locations where `padding_mask` is True). + end_locations = any_equal( + token_ids, stop_token_ids, keras.ops.logical_not(padding_mask) + ) + end_locations = keras.ops.cast(end_locations, "int32") + # Use cumsum to get ones in all locations after end_locations. + cumsum = keras.ops.cast( + keras.ops.cumsum(end_locations, axis=-1), "int32" + ) + overflow = cumsum - end_locations + # Our padding mask is the inverse of these overflow locations. + padding_mask = keras.ops.logical_not( + keras.ops.cast(overflow, "bool") + ) + else: + # Without early stopping, all locations will have been updated. + padding_mask = keras.ops.ones_like(token_ids, dtype="bool") + return { + "token_ids": token_ids, + "padding_mask": padding_mask, + } + + def generate( + self, + inputs, + max_length=None, + stop_token_ids="auto", + strip_prompt=False, + ): + # If `auto`, add end_of_turn as a stop token too. + if self.preprocessor is None and stop_token_ids == "auto": + raise ValueError( + "A `preprocessor` must be attached to the model if " + '`stop_token_ids="auto"`. Currently `preprocessor=None`. To ' + "call `generate()` with preprocessing detached, either pass " + "`stop_token_ids=None` to always generate until `max_length` " + "or pass a tuple of token ids that should terminate generation " + "as `stop_token_ids`." + ) + elif stop_token_ids == "auto": + stop_token_ids = [ + self.preprocessor.tokenizer.end_token_id, + ] + # Add end_of_turn token if available. + end_of_turn_id = self.preprocessor.tokenizer.token_to_id( + "" + ) + if end_of_turn_id is not None: + stop_token_ids.append(end_of_turn_id) + return super().generate( + inputs, + max_length=max_length, + stop_token_ids=stop_token_ids, + strip_prompt=strip_prompt, + ) diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py new file mode 100644 index 0000000000..80cde1a647 --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py @@ -0,0 +1,1001 @@ +import keras +import numpy as np +import tensorflow as tf + +from keras_hub.src.api_export import keras_hub_export +from keras_hub.src.layers.preprocessing.multi_segment_packer import ( + MultiSegmentPacker, +) +from keras_hub.src.models.causal_lm_preprocessor import CausalLMPreprocessor +from keras_hub.src.models.gemma3n.gemma3n_backbone import Gemma3nBackbone +from keras_hub.src.models.gemma3n.gemma3n_image_converter import ( + Gemma3nImageConverter, +) +from keras_hub.src.models.gemma3n.gemma3n_tokenizer import Gemma3nTokenizer +from keras_hub.src.utils.tensor_utils import preprocessing_function +from keras_hub.src.utils.tensor_utils import strip_to_ragged + + +@keras_hub_export("keras_hub.models.Gemma3nCausalLMPreprocessor") +class Gemma3nCausalLMPreprocessor(CausalLMPreprocessor): + """Gemma3n Causal LM preprocessor. + + This preprocessing layer is meant for use with + `keras_hub.models.Gemma3nCausalLM`. It can be configured in three ways: + text-only, text + vision, and text + vision + audio, based on whether the + passed values of `image_converter` and `audio_converter` are None. For + text-only, it takes in batches of strings. For text + vision, it takes in + batches of images and strings. For text + vision + audio, it takes in + batches of images, audio, and strings. It returns outputs in a + `(x, y, sample_weight)` format, where the `y` label is the next token id in + the `x` sequence. `sample_weight` is 0 for "prompt" tokens, and 1 for + "response" tokens, so that the loss is computed only on the "response" + tokens. + + For the text + vision case, this layer replaces instances of + `` token in the prompt with `num_vision_tokens_per_image` + placeholder tokens. It also returns indices of where these vision tokens + are present so that in the model, image embeddings can be placed in the + right position in the sequence of text embeddings. + + For the text + audio case, this layer replaces instances of + `` token in the prompt with `num_audio_tokens_per_audio` + placeholder tokens. It also returns indices of where these audio tokens + are present so that in the model, audio embeddings can be placed in the + right position in the sequence of text embeddings. + + Note that if `max_images_per_prompt` is 2, you can pass either 0, 1, 2 + images per sample. The value 0 corresponds to text-only input. Similarly, + if `max_audios_per_prompt` is 2, you can pass either 0, 1, 2 audio clips + per sample. + + For use with generation, the layer also exposes two methods + `generate_preprocess()` and `generate_postprocess()`. When this preprocessor + is attached to a `keras_hub.models.Gemma3nCausalLM` instance, these methods + will be called implicitly in `generate()`. They can also be called + standalone (e.g. to precompute preprocessing inputs for generation in a + separate process). + + Args: + tokenizer: A `keras_hub.models.Gemma3nTokenizer` instance. + image_converter: A `keras_hub.layers.ImageConverter` instance. Defaults + to `None`. + audio_converter: A `keras_hub.layers.AudioConverter` instance. Defaults + to `None`. + sequence_length: The length of the packed inputs. Defaults to 1024. + add_start_token: If `True`, the preprocessor will prepend the tokenizer + start token to each input sequence. Defaults to `True`. + add_end_token: If `True`, the preprocessor will append the tokenizer + end token to each input sequence. Defaults to `True`. + max_images_per_prompt: int. Permissible number of images per sample in + the batch. Defaults to 2. + num_vision_tokens_per_image: int. Number of vision placeholder tokens + per image. Defaults to 256. + max_audios_per_prompt: int. Permissible number of audio clips per sample + in the batch. Defaults to 2. + num_audio_tokens_per_audio: int. Number of audio placeholder tokens + per audio clip. Defaults to 188. + + Call arguments: + x: A string, `tf.Tensor` or list of python strings. + y: Label data. Should always be `None` as the layer generates labels. + sample_weight: Label weights. Should always be `None` as the layer + generates label weights. + sequence_length: Pass to override the configured `sequence_length` of + the layer. + + Examples: + ```python + # === Language === + # Load the preprocessor from a preset. + preprocessor = keras_hub.models.Gemma3nCausalLMPreprocessor.from_preset( + "gemma3n_instruct_1b" + ) + + # Unbatched inputs. + preprocessor( + { + "prompts": "What is the capital of India?", + "responses": "New Delhi", + } + ) + + # Batched inputs. + preprocessor( + { + "prompts": [ + "What is the capital of India?", + "What is the capital of Spain?" + ], + "responses": ["New Delhi", "Madrid"], + } + ) + + # Apply preprocessing to a `tf.data.Dataset`. + features = { + "prompts": [ + "What is the capital of India?", + "What is the capital of Spain?" + ], + "responses": ["New Delhi", "Madrid"], + } + + ds = tf.data.Dataset.from_tensor_slices(features) + ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) + + # Prepare tokens for generation (no end token). + preprocessor.generate_preprocess(["The quick brown fox jumped."]) + + # Map generation outputs back to strings. + preprocessor.generate_postprocess({ + 'token_ids': np.array([[2, 818, 3823, 8864, 37423, 32694, 236761, 0]]), + 'padding_mask': np.array([[ 1, 1, 1, 1, 1, 1, 1, 0]]), + }) + + # === Vision and Language === + # Load the preprocessor from a preset. + preprocessor = keras_hub.models.Gemma3nCausalLMPreprocessor.from_preset( + "gemma3n_instruct_4b" + ) + + # Text-only inputs (unbatched). + preprocessor( + { + "prompts": "What is the capital of India?", + "responses": "New Delhi", + } + ) + + # Text-only inputs (batched). + preprocessor( + { + "prompts": [ + "What is the capital of India?", + "What is the capital of Spain?" + ], + "responses": ["New Delhi", "Madrid"], + } + ) + + # Unbatched inputs, with one image. + preprocessor( + { + "prompts": "this is a lily ", + "responses": "pristine!", + "images": np.ones((768, 768, 3), dtype="float32") + } + ) + + # Unbatched inputs, with two images. + preprocessor( + { + "prompts": "lily: , sunflower: ", + "responses": "pristine!", + "images": [ + np.ones((768, 768, 3), dtype="float32"), + np.ones((768, 768, 3), dtype="float32") + ], + } + ) + + # Batched inputs, one image per prompt. + preprocessor( + { + "prompts": [ + "this is a lily: ", + "this is a sunflower: " + ], + "responses": ["pristine!", "radiant!"], + "images": [ + np.ones((768, 768, 3), dtype="float32"), + np.ones((768, 768, 3), dtype="float32") + ] + } + ) + + # === Audio and Language === + # Unbatched inputs, with one audio clip. + preprocessor( + { + "prompts": "transcribe this: ", + "responses": "hello world", + "audios": np.ones((16000,), dtype="float32") + } + ) + + # === Vision, Audio and Language === + # Unbatched inputs, with one image and one audio. + preprocessor( + { + "prompts": "image: , audio: ", + "responses": "multimodal!", + "images": np.ones((768, 768, 3), dtype="float32"), + "audios": np.ones((16000,), dtype="float32") + } + ) + ``` + """ + + backbone_cls = Gemma3nBackbone + tokenizer_cls = Gemma3nTokenizer + image_converter_cls = Gemma3nImageConverter + + def __init__( + self, + tokenizer, + image_converter=None, + audio_converter=None, + sequence_length=1024, + add_start_token=True, + add_end_token=True, + max_images_per_prompt=2, + num_vision_tokens_per_image=256, + max_audios_per_prompt=2, + num_audio_tokens_per_audio=188, + **kwargs, + ): + super().__init__( + tokenizer=tokenizer, + sequence_length=sequence_length, + add_start_token=add_start_token, + add_end_token=add_end_token, + **kwargs, + ) + # Validate sequence_length for multimodal inputs. + total_multimodal_tokens = ( + max_images_per_prompt * num_vision_tokens_per_image + + max_audios_per_prompt * num_audio_tokens_per_audio + ) + if ( + image_converter is not None or audio_converter is not None + ) and sequence_length <= total_multimodal_tokens: + raise ValueError( + "`sequence_length` should be greater than " + "`max_images_per_prompt * num_vision_tokens_per_image + " + "max_audios_per_prompt * num_audio_tokens_per_audio`. " + f"Received: `sequence_length` = {sequence_length}, " + f"`max_images_per_prompt` = {max_images_per_prompt}, " + f"`num_vision_tokens_per_image` = {num_vision_tokens_per_image}, " # noqa: E501 + f"`max_audios_per_prompt` = {max_audios_per_prompt}, " + f"`num_audio_tokens_per_audio` = {num_audio_tokens_per_audio}" + ) + self.image_converter = image_converter + self.audio_converter = audio_converter + self.max_images_per_prompt = max_images_per_prompt + self.num_vision_tokens_per_image = num_vision_tokens_per_image + self.max_audios_per_prompt = max_audios_per_prompt + self.num_audio_tokens_per_audio = num_audio_tokens_per_audio + # Determine model type. + self.text_only_model = ( + self.image_converter is None and self.audio_converter is None + ) + # Special tokens for images. + self.image_placeholder = self.tokenizer.image_placeholder + self.start_of_image_token = self.tokenizer.start_of_image_token + self.end_of_image_token = self.tokenizer.end_of_image_token + # Special tokens for audio. + self.audio_placeholder = self.tokenizer.audio_placeholder + self.start_of_audio_token = self.tokenizer.start_of_audio_token + self.end_of_audio_token = self.tokenizer.end_of_audio_token + + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. + self.packer = MultiSegmentPacker( + start_value=self.tokenizer.start_token_id, + end_value=self.tokenizer.end_token_id, + pad_value=self.tokenizer.pad_token_id, + sep_value=[], + sequence_length=self.sequence_length, + ) + self.built = True + + def _get_vision_indices(self, vision_mask): + """Computes indices given vision mask, and pads with 0. + + If `vision_mask` is + + ``` + [ + [False, True, True], [False, True, False], [False, False, False] + ] + ``` + + , then the output will be: + + ``` + [ + [1, 2, 0], [1, 0, 0], [0, 0, 0] + ] + ``` + """ + batch_size, sequence_length = vision_mask.shape + vision_mask_flattened = tf.reshape(vision_mask, [-1]) + vision_indices = tf.where(vision_mask_flattened)[..., 0] + vision_indices = tf.cast(vision_indices, dtype=tf.int32) + row_lengths = tf.math.reduce_sum( + tf.cast(vision_mask, dtype=vision_indices.dtype), axis=1 + ) + batched_vision_indices = tf.RaggedTensor.from_row_lengths( + values=vision_indices, + row_lengths=row_lengths, + ) + to_subtract = tf.math.scalar_mul( + scalar=tf.cast(sequence_length, dtype=tf.int32), + x=tf.range( + start=0, + limit=tf.shape(vision_mask)[0], + dtype=tf.int32, + ), + ) + # All indices should be independent of other samples in the batch. + batched_vision_indices = tf.math.subtract( + batched_vision_indices, + tf.expand_dims(to_subtract, axis=-1), + ) + # Pad the indices. + batched_vision_indices = batched_vision_indices.to_tensor( + shape=[ + batch_size, + self.max_images_per_prompt * self.num_vision_tokens_per_image, + ], + default_value=0, + ) + return batched_vision_indices + + def _get_audio_indices(self, audio_mask): + """Computes indices given audio mask, and pads with 0. + + Similar to _get_vision_indices but for audio tokens. + """ + batch_size, sequence_length = audio_mask.shape + audio_mask_flattened = tf.reshape(audio_mask, [-1]) + audio_indices = tf.where(audio_mask_flattened)[..., 0] + audio_indices = tf.cast(audio_indices, dtype=tf.int32) + row_lengths = tf.math.reduce_sum( + tf.cast(audio_mask, dtype=audio_indices.dtype), axis=1 + ) + batched_audio_indices = tf.RaggedTensor.from_row_lengths( + values=audio_indices, + row_lengths=row_lengths, + ) + to_subtract = tf.math.scalar_mul( + scalar=tf.cast(sequence_length, dtype=tf.int32), + x=tf.range( + start=0, + limit=tf.shape(audio_mask)[0], + dtype=tf.int32, + ), + ) + # All indices should be independent of other samples in the batch. + batched_audio_indices = tf.math.subtract( + batched_audio_indices, + tf.expand_dims(to_subtract, axis=-1), + ) + # Pad the indices. + batched_audio_indices = batched_audio_indices.to_tensor( + shape=[ + batch_size, + self.max_audios_per_prompt * self.num_audio_tokens_per_audio, + ], + default_value=0, + ) + return batched_audio_indices + + def _format_output( + self, + images, + audios, + input_features, + input_features_mask, + token_ids, + vision_mask, + audio_mask, + response_mask, + padding_mask, + return_labels=False, + text_only_input=False, + batched=False, + ): + if return_labels: + # Target `y` will be the next token. + y = token_ids[..., 1:] + # Only compute the loss for labels in the response. + sample_weight = response_mask[..., 1:] + # The last token does not have a next token. So, remove it. + token_ids = token_ids[..., :-1] + vision_mask = vision_mask[..., :-1] + audio_mask = audio_mask[..., :-1] + response_mask = response_mask[..., :-1] + padding_mask = padding_mask[..., :-1] + batch_size = tf.shape(vision_mask)[0] + if text_only_input or self.image_converter is None: + vision_indices = tf.zeros( + shape=[batch_size, 0], + dtype=tf.int32, + ) + else: + vision_indices = self._get_vision_indices(vision_mask=vision_mask) + if text_only_input or self.audio_converter is None: + audio_indices = tf.zeros( + shape=[batch_size, 0], + dtype=tf.int32, + ) + else: + audio_indices = self._get_audio_indices(audio_mask=audio_mask) + x = { + # Image + "images": images if batched else tf.squeeze(images, axis=0), + # Audio + "audios": audios if batched else tf.squeeze(audios, axis=0), + "input_features": ( + input_features + if batched + else tf.squeeze(input_features, axis=0) + ), + "input_features_mask": ( + input_features_mask + if batched + else tf.squeeze(input_features_mask, axis=0) + ), + # Text + "token_ids": ( + token_ids if batched else tf.squeeze(token_ids, axis=0) + ), + "vision_indices": ( + vision_indices + if batched + else tf.squeeze(vision_indices, axis=0) + ), + "audio_indices": ( + audio_indices if batched else tf.squeeze(audio_indices, axis=0) + ), + "vision_mask": ( + vision_mask if batched else tf.squeeze(vision_mask, axis=0) + ), + "audio_mask": ( + audio_mask if batched else tf.squeeze(audio_mask, axis=0) + ), + "padding_mask": ( + padding_mask if batched else tf.squeeze(padding_mask, axis=0) + ), + } + if return_labels: + if not batched: + y = tf.squeeze(y, axis=0) + sample_weight = tf.squeeze(sample_weight, 0) + return keras.utils.pack_x_y_sample_weight(x, y, sample_weight) + else: + return x + + def _preprocess_images(self, images, batched): + desired_height = self.image_converter.image_size[0] + desired_width = self.image_converter.image_size[1] + # Images can be lists/ragged tensors. We need to pad them/truncate them. + if isinstance(images, (list, np.ndarray)): + images = tf.ragged.constant(images) + elif isinstance(images, tf.RaggedTensor): + pass + elif isinstance(images, tf.Tensor): + images = tf.RaggedTensor.from_tensor(images) + else: + # Attempt to convert anyway. + try: + images = tf.RaggedTensor.from_tensor(images) + except: # noqa: E722 + raise ValueError( + "`images` should be a list, ragged tensor, dense tensor. " + f"Received: `type(images)` = {type(images)}" + ) + if not batched: + images = tf.expand_dims(images, axis=0) + # If the input is a list of images, instead of list of lists of images. + if len(images.shape) == 4: + images = tf.expand_dims(images, axis=1) + # Convert to dense tensor. + images = images.to_tensor( + shape=[None, self.max_images_per_prompt, None, None, 3], + default_value=0, + ) + # Resize, rescale, etc. the images. + original_images_shape = tf.shape(images) + # Before passing through image converter, we need to collapse the + # first two dimensions. + images = tf.reshape( + images, + [ + -1, + original_images_shape[-3], + original_images_shape[-2], + original_images_shape[-1], + ], + ) + images = self.image_converter(images) + if keras.config.backend() == "torch" and not isinstance( + images, tf.Tensor + ): + images = images.cpu() + # Recover the rank. + images = tf.reshape( + images, + [ + original_images_shape[0], + self.max_images_per_prompt, + desired_height, + desired_width, + original_images_shape[-1], + ], + ) + return images + + def _preprocess_audios(self, audios, batched): + if hasattr(audios, "cpu") and hasattr(audios, "numpy"): + audios = audios.cpu().numpy() + # Audios can be lists/ragged tensors. We need to pad them/truncate them. + if isinstance(audios, (list, np.ndarray)): + if isinstance(audios, np.ndarray) and audios.ndim == 1: + audios = [audios] + audios = tf.ragged.constant(audios, dtype=tf.float32) + elif isinstance(audios, tf.RaggedTensor): + pass + elif isinstance(audios, tf.Tensor): + if len(audios.shape) > 1: + audios = tf.RaggedTensor.from_tensor(audios) + else: + audios = tf.ragged.constant([audios.numpy()], dtype=tf.float32) + else: + # Attempt to convert anyway. + try: + audios = tf.convert_to_tensor(audios, dtype=tf.float32) + if len(audios.shape) == 1: + audios = tf.ragged.constant( + [audios.numpy()], dtype=tf.float32 + ) + else: + audios = tf.RaggedTensor.from_tensor(audios) + except: # noqa: E722 + raise ValueError( + "`audios` should be a list, ragged tensor, dense tensor. " + f"Received: `type(audios)` = {type(audios)}" + ) + if not batched: + audios = tf.expand_dims(audios, axis=0) + # If the input is a list of audio arrays, instead of list of lists. + if len(audios.shape) == 2: + audios = tf.expand_dims(audios, axis=1) + # Convert to dense tensor. + audios = audios.to_tensor( + shape=[None, self.max_audios_per_prompt, None], + default_value=0, + ) + # Process through audio converter. + original_audios_shape = tf.shape(audios) + batch_size = original_audios_shape[0] + num_audios = original_audios_shape[1] + # Flatten batch and audio dimensions for processing. + audios_flat = tf.reshape(audios, [-1, original_audios_shape[-1]]) + # Process audio through converter. + input_features, input_features_mask = self.audio_converter( + audios_flat, + padding="longest", + ) + # Reshape back to [batch_size, max_audios_per_prompt, ...]. + feature_shape = tf.shape(input_features) + input_features = tf.reshape( + input_features, + [batch_size, num_audios, feature_shape[1], feature_shape[2]], + ) + mask_shape = tf.shape(input_features_mask) + input_features_mask = tf.reshape( + input_features_mask, + [batch_size, num_audios, mask_shape[1]], + ) + return audios, input_features, input_features_mask + + @preprocessing_function + def call( + self, + x, + y=None, + sample_weight=None, + sequence_length=None, + ): + sequence_length = sequence_length or self.sequence_length + + # === Input extraction and validation === + # Extract text part of the input. + prompts, responses = x["prompts"], x["responses"] + tf.debugging.assert_shapes([(prompts, ("N",)), (responses, ("N",))]) + # Find out if the input is batched/not batched. + batched = True + if isinstance(prompts, str): + batched = False + prompts = [prompts] + responses = [responses] + if isinstance(prompts, tf.Tensor) and len(prompts.shape) == 0: + batched = False + prompts = tf.expand_dims(prompts, axis=0) + responses = tf.expand_dims(responses, axis=0) + # Extract images and audios from the input. + images = x.get("images", None) + audios = x.get("audios", None) + # Validate multimodal inputs. + if self.text_only_model and (images is not None or audios is not None): + raise ValueError( + "The initialized preprocessor/model is text-only, but " + "`images` or `audios` is not `None`." + ) + # Add image placeholder tokens. + if not self.text_only_model and self.image_converter is not None: + prompts = tf.strings.regex_replace( + prompts, + self.start_of_image_token, + f"\n\n{self.start_of_image_token}" + + self.image_placeholder * self.num_vision_tokens_per_image + + f"{self.end_of_image_token}\n\n", + ) + # Add audio placeholder tokens. + if not self.text_only_model and self.audio_converter is not None: + prompts = tf.strings.regex_replace( + prompts, + self.start_of_audio_token, + f"\n\n{self.start_of_audio_token}" + + self.audio_placeholder * self.num_audio_tokens_per_audio + + f"{self.end_of_audio_token}\n\n", + ) + + # === Tokenization, padding, etc. === + # Tokenise the inputs. + prompts = self.tokenizer(prompts) + responses = self.tokenizer(responses) + # Padding. + token_ids, segment_ids = self.packer( + (prompts, responses), + sequence_length=sequence_length + 1, + add_start_value=self.add_start_token, + add_end_value=self.add_end_token, + ) + response_mask = segment_ids == 1 + padding_mask = token_ids != self.tokenizer.pad_token_id + + # === Text Model === + if self.text_only_model: + # The last token does not have a next token, so we truncate it out. + x = { + "token_ids": token_ids[..., :-1], + "padding_mask": padding_mask[..., :-1], + } + # Target `y` will be the next token. + y = token_ids[..., 1:] + # Only compute the loss for labels in the response. + sample_weight = response_mask[..., 1:] + # Squeeze if not batched. + if not batched: + x["token_ids"] = tf.squeeze(x["token_ids"], axis=0) + x["padding_mask"] = tf.squeeze(x["padding_mask"], axis=0) + y = tf.squeeze(y, axis=0) + sample_weight = tf.squeeze(sample_weight, axis=0) + + return keras.utils.pack_x_y_sample_weight(x, y, sample_weight) + + # === Multimodal processing === + batch_size = tf.shape(prompts)[0] + desired_height = ( + self.image_converter.image_size[0] if self.image_converter else 0 + ) + desired_width = ( + self.image_converter.image_size[1] if self.image_converter else 0 + ) + # Process vision. + if images is None and self.image_converter is not None: + images = tf.ones( + shape=[ + batch_size, + 0, + desired_height, + desired_width, + 3, + ], + dtype="float32", + ) + vision_mask = tf.zeros_like(token_ids, dtype=bool) + elif images is not None and self.image_converter is not None: + images = self._preprocess_images(images=images, batched=batched) + vision_mask = token_ids == self.tokenizer.image_placeholder_id + else: + # No image converter. + images = tf.ones( + shape=[ + batch_size, + 0, + 0, + 0, + 3, + ], + dtype="float32", + ) + vision_mask = tf.zeros_like(token_ids, dtype=bool) + # Process audio. + if audios is None and self.audio_converter is not None: + audios = tf.ones( + shape=[batch_size, 0, 0], + dtype="float32", + ) + input_features = tf.ones( + shape=[batch_size, 0, 0, 128], + dtype="float32", + ) + input_features_mask = tf.ones( + shape=[batch_size, 0, 0], + dtype="bool", + ) + audio_mask = tf.zeros_like(token_ids, dtype=bool) + elif audios is not None and self.audio_converter is not None: + audios, input_features, input_features_mask = ( + self._preprocess_audios(audios=audios, batched=batched) + ) + audio_mask = token_ids == self.tokenizer.audio_placeholder_id + else: + # No audio converter. + audios = tf.ones( + shape=[batch_size, 0, 0], + dtype="float32", + ) + input_features = tf.ones( + shape=[batch_size, 0, 0, 128], + dtype="float32", + ) + input_features_mask = tf.ones( + shape=[batch_size, 0, 0], + dtype="bool", + ) + audio_mask = tf.zeros_like(token_ids, dtype=bool) + + return self._format_output( + images=images, + audios=audios, + input_features=input_features, + input_features_mask=input_features_mask, + token_ids=token_ids, + vision_mask=vision_mask, + audio_mask=audio_mask, + response_mask=response_mask, + padding_mask=padding_mask, + return_labels=True, + text_only_input=(images is None and audios is None), + batched=batched, + ) + + @preprocessing_function + def generate_preprocess( + self, + x, + sequence_length=None, + ): + """Convert strings to integer token input for generation. + + Similar to calling the layer for training, this method takes in strings + or tensor strings, tokenizes and packs the input, and computes a padding + mask masking all inputs not filled in with a padded value. + + Unlike calling the layer for training, this method does not compute + labels and will never append a `tokenizer.end_token_id` to the end of + the sequence (as generation is expected to continue at the end of the + inputted prompt). + """ + if not self.built: + self.build(None) + # Extract inputs. + if isinstance(x, dict): + images = x.get("images", None) + audios = x.get("audios", None) + responses = x.get("responses", None) + prompts = x["prompts"] + else: + images = None + audios = None + responses = None + prompts = x + # Find out if the input is batched/not batched. + batched = True + if isinstance(prompts, str): + batched = False + prompts = [prompts] + if responses is not None: + responses = [responses] + if isinstance(prompts, tf.Tensor) and len(prompts.shape) == 0: + batched = False + prompts = tf.expand_dims(prompts, axis=0) + if responses is not None: + responses = tf.expand_dims(responses, axis=0) + # Validate multimodal inputs. + if self.text_only_model and (images is not None or audios is not None): + raise ValueError( + "The initialized preprocessor/model is text-only, but " + "`images` or `audios` is not `None`." + ) + # Add image placeholder tokens. + if not self.text_only_model and self.image_converter is not None: + prompts = tf.strings.regex_replace( + prompts, + self.start_of_image_token, + f"\n\n{self.start_of_image_token}" + + self.image_placeholder * self.num_vision_tokens_per_image + + f"{self.end_of_image_token}\n\n", + ) + # Add audio placeholder tokens. + if not self.text_only_model and self.audio_converter is not None: + prompts = tf.strings.regex_replace( + prompts, + self.start_of_audio_token, + f"\n\n{self.start_of_audio_token}" + + self.audio_placeholder * self.num_audio_tokens_per_audio + + f"{self.end_of_audio_token}\n\n", + ) + + # === Tokenization, padding, etc. === + prompts = self.tokenizer(prompts) + if responses is not None: + responses = self.tokenizer(responses) + segments = (prompts, responses) + else: + segments = (prompts,) + # Padding. + token_ids, segment_ids = self.packer( + segments, + sequence_length=sequence_length, + add_end_value=False, + ) + response_mask = segment_ids == 1 + padding_mask = token_ids != self.tokenizer.pad_token_id + + # === Text Model === + if self.text_only_model: + return { + "token_ids": ( + token_ids if batched else tf.squeeze(token_ids, axis=0) + ), + "padding_mask": ( + padding_mask + if batched + else tf.squeeze(padding_mask, axis=0) + ), + } + + # === Multimodal processing === + batch_size = tf.shape(prompts)[0] + desired_height = ( + self.image_converter.image_size[0] if self.image_converter else 0 + ) + desired_width = ( + self.image_converter.image_size[1] if self.image_converter else 0 + ) + # Process vision. + if images is None and self.image_converter is not None: + images = tf.ones( + shape=[ + batch_size, + 0, + desired_height, + desired_width, + 3, + ], + dtype="float32", + ) + vision_mask = tf.zeros_like(token_ids, dtype=bool) + elif images is not None and self.image_converter is not None: + images = self._preprocess_images(images=images, batched=batched) + vision_mask = token_ids == self.tokenizer.image_placeholder_id + else: + # No image converter. + images = tf.ones( + shape=[ + batch_size, + 0, + 0, + 0, + 3, + ], + dtype="float32", + ) + vision_mask = tf.zeros_like(token_ids, dtype=bool) + # Process audio. + if audios is None and self.audio_converter is not None: + audios = tf.ones( + shape=[batch_size, 0, 0], + dtype="float32", + ) + input_features = tf.ones( + shape=[batch_size, 0, 0, 128], + dtype="float32", + ) + input_features_mask = tf.ones( + shape=[batch_size, 0, 0], + dtype="bool", + ) + audio_mask = tf.zeros_like(token_ids, dtype=bool) + elif audios is not None and self.audio_converter is not None: + audios, input_features, input_features_mask = ( + self._preprocess_audios(audios=audios, batched=batched) + ) + audio_mask = token_ids == self.tokenizer.audio_placeholder_id + else: + # No audio converter. + audios = tf.ones( + shape=[batch_size, 0, 0], + dtype="float32", + ) + input_features = tf.ones( + shape=[batch_size, 0, 0, 128], + dtype="float32", + ) + input_features_mask = tf.ones( + shape=[batch_size, 0, 0], + dtype="bool", + ) + audio_mask = tf.zeros_like(token_ids, dtype=bool) + + return self._format_output( + images=images, + audios=audios, + input_features=input_features, + input_features_mask=input_features_mask, + token_ids=token_ids, + vision_mask=vision_mask, + audio_mask=audio_mask, + response_mask=response_mask, + padding_mask=padding_mask, + return_labels=False, + text_only_input=(images is None and audios is None), + batched=batched, + ) + + def get_config(self): + config = super().get_config() + config.update( + { + "num_vision_tokens_per_image": self.num_vision_tokens_per_image, + "max_images_per_prompt": self.max_images_per_prompt, + "num_audio_tokens_per_audio": self.num_audio_tokens_per_audio, + "max_audios_per_prompt": self.max_audios_per_prompt, + } + ) + return config + + @preprocessing_function + def generate_postprocess( + self, + x, + ): + """Convert integer token output to strings for generation. + + This method reverses `generate_preprocess()`, by first removing all + padding and start/end tokens, and then converting the integer sequence + back to a string. + """ + if not self.built: + self.build(None) + token_ids, padding_mask = x["token_ids"], x["padding_mask"] + ids_to_strip = self.tokenizer.special_token_ids + if self.tokenizer.start_of_image_token_id in ids_to_strip: + ids_to_strip.remove(self.tokenizer.start_of_image_token_id) + if self.tokenizer.start_of_audio_token_id in ids_to_strip: + ids_to_strip.remove(self.tokenizer.start_of_audio_token_id) + token_ids = strip_to_ragged(token_ids, padding_mask, ids_to_strip) + return self.tokenizer.detokenize(token_ids) + + @property + def max_images_per_prompt(self): + return self._max_images_per_prompt + + @max_images_per_prompt.setter + def max_images_per_prompt(self, value): + self._max_images_per_prompt = value + + @property + def max_audios_per_prompt(self): + return self._max_audios_per_prompt + + @max_audios_per_prompt.setter + def max_audios_per_prompt(self, value): + self._max_audios_per_prompt = value diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py new file mode 100644 index 0000000000..bee9d63217 --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py @@ -0,0 +1,321 @@ +import numpy as np + +from keras_hub.src.models.gemma3n.gemma3n_audio_converter import ( + Gemma3nAudioConverter, +) +from keras_hub.src.models.gemma3n.gemma3n_causal_lm_preprocessor import ( + Gemma3nCausalLMPreprocessor, +) +from keras_hub.src.models.gemma3n.gemma3n_image_converter import ( + Gemma3nImageConverter, +) +from keras_hub.src.tests.mocks.mock_gemma3n_tokenizer import ( + MockGemma3nTokenizer, +) +from keras_hub.src.tests.test_case import TestCase + + +class Gemma3nCausalLMPreprocessorTest(TestCase): + def setUp(self): + # Easier to use a mock here, instead of trying to figure out why + # SentencePiece cannot tokenize and detokenize special tokens + # properly. + self.tokenizer = MockGemma3nTokenizer() + + # === Text Preprocessor === + self.init_text_kwargs = { + "tokenizer": self.tokenizer, + "image_converter": None, + "audio_converter": None, + "sequence_length": 8, + "max_images_per_prompt": 0, + "num_vision_tokens_per_image": 0, + "max_audios_per_prompt": 0, + "num_audio_tokens_per_audio": 0, + } + self.text_preprocessor = Gemma3nCausalLMPreprocessor( + tokenizer=self.tokenizer, + image_converter=None, + audio_converter=None, + sequence_length=100, + max_images_per_prompt=0, + num_vision_tokens_per_image=0, + max_audios_per_prompt=0, + num_audio_tokens_per_audio=0, + ) + + # === Text + Image Preprocessor === + self.image_converter = Gemma3nImageConverter( + image_size=(4, 4), + ) + self.init_vision_kwargs = { + "tokenizer": self.tokenizer, + "image_converter": self.image_converter, + "audio_converter": None, + "sequence_length": 20, + "max_images_per_prompt": 2, + "num_vision_tokens_per_image": 5, + "max_audios_per_prompt": 0, + "num_audio_tokens_per_audio": 0, + } + + # === Text + Audio Preprocessor === + self.audio_converter = Gemma3nAudioConverter( + feature_size=16, + sampling_rate=16000, + ) + self.init_audio_kwargs = { + "tokenizer": self.tokenizer, + "image_converter": None, + "audio_converter": self.audio_converter, + "sequence_length": 20, + "max_images_per_prompt": 0, + "num_vision_tokens_per_image": 0, + "max_audios_per_prompt": 2, + "num_audio_tokens_per_audio": 3, + } + + # === Text + Image + Audio Preprocessor === + self.init_multimodal_kwargs = { + "tokenizer": self.tokenizer, + "image_converter": self.image_converter, + "audio_converter": self.audio_converter, + "sequence_length": 30, + "max_images_per_prompt": 2, + "num_vision_tokens_per_image": 5, + "max_audios_per_prompt": 2, + "num_audio_tokens_per_audio": 3, + } + + def test_text_preprocessor_basics(self): + input_data = { + "prompts": ["the quick brown fox"], + "responses": ["round"], + } + self.run_preprocessing_layer_test( + cls=Gemma3nCausalLMPreprocessor, + init_kwargs=self.init_text_kwargs, + input_data=input_data, + expected_output=( + { + "token_ids": [[1, 9, 14, 10, 12, 15, 2, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, + [[9, 14, 10, 12, 15, 2, 0, 0]], # Labels shifted. + [[0, 0, 0, 0, 1, 1, 0, 0]], # Zero out unlabeled examples. + ), + ) + + def test_vision_preprocessor_basics(self): + input_data = { + "prompts": ["the quick brown fox "], + "responses": ["round"], + "images": [[np.ones((8, 8, 3))]], + } + output = self.run_preprocessing_layer_test( + cls=Gemma3nCausalLMPreprocessor, + init_kwargs=self.init_vision_kwargs, + input_data=input_data, + return_output=True, + ) + expected_output = [ + { + "vision_indices": [list(range(7, 12)) + [0] * 5], + "audio_indices": [[]], + "vision_mask": [[0] * 7 + [1] * 5 + [0] * 8], + "audio_mask": [[0] * 20], + "token_ids": [ + [1, 9, 14, 10, 12, 16, 4] + + [8] * 5 + + [5, 16, 15, 2] + + [0] * 4 + ], + "padding_mask": [[1] * 16 + [0] * 4], + }, + [ + [9, 14, 10, 12, 16, 4] + [8] * 5 + [5, 16, 15, 2] + [0] * 5 + ], # Labels shifted. + [[0] * 13 + [1] * 2 + [0] * 5], # Zero out unlabeled examples. + ] + # Check shape for images. + self.assertAllEqual(output[0]["images"].shape, [1, 2, 4, 4, 3]) + # Check shape for audios (should be empty). + self.assertAllEqual(output[0]["audios"].shape, [1, 0, 0]) + self.assertAllEqual(output[0]["input_features"].shape, [1, 0, 0, 128]) + self.assertAllEqual(output[0]["input_features_mask"].shape, [1, 0, 0]) + # For everything else, check the actual values. + del output[0]["images"] + del output[0]["audios"] + del output[0]["input_features"] + del output[0]["input_features_mask"] + for key in expected_output[0].keys(): + self.assertAllEqual(output[0][key], expected_output[0][key]) + self.assertAllEqual(output[1], expected_output[1]) + self.assertAllEqual(output[2], expected_output[2]) + + def test_audio_preprocessor_basics(self): + input_data = { + "prompts": ["the quick "], + "responses": ["brown"], + "audios": [[np.ones((16000,))]], + } + preprocessor = Gemma3nCausalLMPreprocessor(**self.init_audio_kwargs) + output = preprocessor(input_data) + # Check that we have the right keys. + self.assertIn("token_ids", output[0]) + self.assertIn("vision_indices", output[0]) + self.assertIn("audio_indices", output[0]) + self.assertIn("vision_mask", output[0]) + self.assertIn("audio_mask", output[0]) + self.assertIn("padding_mask", output[0]) + self.assertIn("images", output[0]) + self.assertIn("audios", output[0]) + self.assertIn("input_features", output[0]) + self.assertIn("input_features_mask", output[0]) + # Check shapes for images (should be empty). + self.assertAllEqual(output[0]["images"].shape[0:2], [1, 0]) + # Check shapes for audios (should have data). + self.assertAllEqual(output[0]["audios"].shape[0:2], [1, 2]) + self.assertEqual(output[0]["input_features"].shape[0], 1) + self.assertEqual(output[0]["input_features_mask"].shape[0], 1) + + def test_multimodal_preprocessor_basics(self): + input_data = { + "prompts": ["image audio "], + "responses": ["test"], + "images": [[np.ones((8, 8, 3))]], + "audios": [[np.ones((16000,))]], + } + preprocessor = Gemma3nCausalLMPreprocessor( + **self.init_multimodal_kwargs + ) + output = preprocessor(input_data) + # Check that we have all the right keys. + self.assertIn("token_ids", output[0]) + self.assertIn("vision_indices", output[0]) + self.assertIn("audio_indices", output[0]) + self.assertIn("vision_mask", output[0]) + self.assertIn("audio_mask", output[0]) + self.assertIn("padding_mask", output[0]) + self.assertIn("images", output[0]) + self.assertIn("audios", output[0]) + self.assertIn("input_features", output[0]) + self.assertIn("input_features_mask", output[0]) + # Check shapes for images. + self.assertAllEqual(output[0]["images"].shape, [1, 2, 4, 4, 3]) + # Check shapes for audios. + self.assertAllEqual(output[0]["audios"].shape[0:2], [1, 2]) + self.assertEqual(output[0]["input_features"].shape[0], 1) + self.assertEqual(output[0]["input_features_mask"].shape[0], 1) + # Check that both vision and audio masks have some True values. + vision_mask_sum = np.sum(np.array(output[0]["vision_mask"])) + audio_mask_sum = np.sum(np.array(output[0]["audio_mask"])) + self.assertGreater(vision_mask_sum, 0) + self.assertGreater(audio_mask_sum, 0) + + def test_text_no_start_end_token(self): + input_data = { + "prompts": ["the quick brown fox"] * 4, + "responses": ["round"] * 4, + } + preprocessor = Gemma3nCausalLMPreprocessor( + **self.init_text_kwargs, + add_start_token=False, + add_end_token=False, + ) + x, y, sw = preprocessor(input_data) + self.assertAllEqual(x["token_ids"], [[9, 14, 10, 12, 15, 0, 0, 0]] * 4) + self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0]] * 4) + self.assertAllEqual(y, [[14, 10, 12, 15, 0, 0, 0, 0]] * 4) + self.assertAllEqual(sw, [[0, 0, 0, 1, 0, 0, 0, 0]] * 4) + + def test_text_generate_preprocess(self): + input_data = "the quick brown fox" + preprocessor = Gemma3nCausalLMPreprocessor(**self.init_text_kwargs) + x = preprocessor.generate_preprocess(input_data) + self.assertAllEqual(x["token_ids"], [1, 9, 14, 10, 12, 0, 0, 0]) + self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 0, 0, 0]) + + def test_vision_generate_preprocess(self): + input_data = { + "prompts": "the quick brown fox ", + "images": np.ones((8, 8, 3)), + } + preprocessor = Gemma3nCausalLMPreprocessor(**self.init_vision_kwargs) + x = preprocessor.generate_preprocess(input_data) + self.assertAllEqual( + x["token_ids"], + [1, 9, 14, 10, 12, 16, 4] + [8] * 5 + [5, 16] + [0] * 6, + ) + self.assertAllEqual(x["padding_mask"], [1] * 14 + [0] * 6) + self.assertAllEqual(x["vision_indices"], list(range(7, 12)) + [0] * 5) + self.assertAllEqual(x["vision_mask"], [0] * 7 + [1] * 5 + [0] * 8) + self.assertAllEqual(x["images"].shape, [2, 4, 4, 3]) + + def test_audio_generate_preprocess(self): + input_data = { + "prompts": "the quick ", + "audios": np.ones((16000,)), + } + preprocessor = Gemma3nCausalLMPreprocessor(**self.init_audio_kwargs) + x = preprocessor.generate_preprocess(input_data) + # Check that we have the right keys. + self.assertIn("token_ids", x) + self.assertIn("audio_indices", x) + self.assertIn("audio_mask", x) + self.assertIn("audios", x) + self.assertIn("input_features", x) + self.assertIn("input_features_mask", x) + + def test_text_generate_postprocess(self): + input_data = { + "token_ids": [1, 9, 14, 10, 12, 0, 0, 0], + "padding_mask": [1, 1, 1, 1, 1, 0, 0, 0], + } + preprocessor = Gemma3nCausalLMPreprocessor(**self.init_text_kwargs) + x = preprocessor.generate_postprocess(input_data) + self.assertAllEqual(x, "the quick brown fox") + + def test_vision_generate_postprocess(self): + input_data = { + "token_ids": [1, 9, 14, 10, 12, 16, 4] + + [8] * 5 + + [5, 16] + + [0] * 6, + "padding_mask": [1] * 14 + [0] * 6, + } + preprocessor = Gemma3nCausalLMPreprocessor(**self.init_text_kwargs) + x = preprocessor.generate_postprocess(input_data) + self.assertAllEqual(x, "the quick brown fox \n\n ") + + def test_invalid_shape(self): + with self.assertRaises(ValueError): + input_data = { + "prompts": ["hello world", "this is testing"], + "responses": [""], + } + self.text_preprocessor(input_data) + with self.assertRaises(ValueError): + input_data = { + "prompts": ["hello world", "this is testing"], + "responses": ["hello", "", ""], + } + self.text_preprocessor(input_data) + + def test_text_only_with_images_raises_error(self): + with self.assertRaises(ValueError): + input_data = { + "prompts": ["hello"], + "responses": ["world"], + "images": [np.ones((8, 8, 3))], + } + self.text_preprocessor(input_data) + + def test_text_only_with_audios_raises_error(self): + with self.assertRaises(ValueError): + input_data = { + "prompts": ["hello"], + "responses": ["world"], + "audios": [np.ones((16000,))], + } + self.text_preprocessor(input_data) diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py new file mode 100644 index 0000000000..c8beeaae3a --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py @@ -0,0 +1,364 @@ +import copy +from unittest.mock import patch + +import keras +import numpy as np +from absl.testing import parameterized +from keras import ops + +from keras_hub.src.models.gemma3n.gemma3n_audio_converter import ( + Gemma3nAudioConverter, +) +from keras_hub.src.models.gemma3n.gemma3n_audio_encoder import ( + Gemma3nAudioEncoder, +) +from keras_hub.src.models.gemma3n.gemma3n_backbone import Gemma3nBackbone +from keras_hub.src.models.gemma3n.gemma3n_causal_lm import Gemma3nCausalLM +from keras_hub.src.models.gemma3n.gemma3n_causal_lm_preprocessor import ( + Gemma3nCausalLMPreprocessor, +) +from keras_hub.src.models.gemma3n.gemma3n_image_converter import ( + Gemma3nImageConverter, +) +from keras_hub.src.models.mobilenetv5.mobilenetv5_backbone import ( + MobileNetV5Backbone, +) +from keras_hub.src.models.mobilenetv5.mobilenetv5_builder import ( + convert_arch_def_to_stackwise, +) +from keras_hub.src.tests.mocks.mock_gemma3n_tokenizer import ( + MockGemma3nTokenizer, +) +from keras_hub.src.tests.test_case import TestCase +from keras_hub.src.utils.keras_utils import fused_attention_op_available +from keras_hub.src.utils.keras_utils import gpu_supports_fused_attention_op +from keras_hub.src.utils.keras_utils import running_on_gpu + + +class Gemma3nCausalLMTest(TestCase, parameterized.TestCase): + def setUp(self): + self.tokenizer = MockGemma3nTokenizer() + + # === Vision Encoder === + vision_arch_def = [["er_r1_k3_s1_e1_c8"]] + stackwise_params = convert_arch_def_to_stackwise(vision_arch_def) + vision_encoder = MobileNetV5Backbone( + **stackwise_params, + num_features=4, + image_shape=(16, 16, 3), + use_msfa=False, + ) + + # === Audio Encoder === + audio_encoder = Gemma3nAudioEncoder( + hidden_size=4, + input_feat_size=16, + sscp_conv_channel_size=[2, 4], + sscp_conv_kernel_size=[(1, 1), (1, 1)], + sscp_conv_stride_size=[(2, 2), (2, 2)], + sscp_conv_group_norm_eps=1e-5, + conf_num_hidden_layers=1, + rms_norm_eps=1e-6, + gradient_clipping=1.0, + conf_residual_weight=0.5, + conf_num_attention_heads=1, + conf_attention_chunk_size=2, + conf_attention_context_right=1, + conf_attention_context_left=1, + conf_attention_logit_cap=50.0, + conf_conv_kernel_size=3, + conf_reduction_factor=1, + ) + + # === Text-Only === + self.text_preprocessor = Gemma3nCausalLMPreprocessor( + tokenizer=self.tokenizer, + image_converter=None, + audio_converter=None, + sequence_length=20, + max_images_per_prompt=0, + num_vision_tokens_per_image=0, + max_audios_per_prompt=0, + num_audio_tokens_per_audio=0, + ) + text_backbone_init_kwargs = { + "text_vocab_size": self.text_preprocessor.tokenizer.vocabulary_size(), # noqa: E501 + "text_hidden_size": 4, + "num_hidden_layers": 1, + "pad_token_id": 0, + "num_attention_heads": 1, + "num_key_value_heads": 1, + "head_dim": 4, + "intermediate_size": [8], + "hidden_activation": "gelu_approximate", + "layer_types": ["full_attention"], + "sliding_window": 4, + "rope_theta": 10000.0, + "max_position_embeddings": 20, + "vocab_size_per_layer_input": 10, + "hidden_size_per_layer_input": 2, + "altup_num_inputs": 2, + "laurel_rank": 1, + } + self.text_backbone = Gemma3nBackbone(**text_backbone_init_kwargs) + self.text_init_kwargs = { + "preprocessor": self.text_preprocessor, + "backbone": self.text_backbone, + } + self.text_train_data = ( + { + "prompts": ["the quick brown fox", "the quick brown fox"], + "responses": ["the earth is round", "the earth is round"], + }, + ) + self.text_input_data = self.text_preprocessor(*self.text_train_data)[0] + + # === Vision + Text === + self.image_converter = Gemma3nImageConverter( + image_size=(16, 16), + ) + self.vision_preprocessor = Gemma3nCausalLMPreprocessor( + tokenizer=self.tokenizer, + image_converter=self.image_converter, + audio_converter=None, + sequence_length=20, + max_images_per_prompt=2, + num_vision_tokens_per_image=4, + max_audios_per_prompt=0, + num_audio_tokens_per_audio=0, + ) + vision_backbone_init_kwargs = copy.deepcopy(text_backbone_init_kwargs) + vision_backbone_init_kwargs["vision_encoder_config"] = ( + vision_encoder.get_config() + ) + vision_backbone_init_kwargs["vision_hidden_size"] = 8 + self.vision_backbone = Gemma3nBackbone(**vision_backbone_init_kwargs) + self.vision_init_kwargs = { + "preprocessor": self.vision_preprocessor, + "backbone": self.vision_backbone, + } + self.vision_train_data = ( + { + "prompts": [ + "the quick brown fox ", + "the quick brown fox", + ], + "responses": ["the earth is round", "the earth is round"], + "images": [np.ones((8, 8, 3)), np.ones((8, 8, 3))], + }, + ) + self.vision_input_data = self.vision_preprocessor( + *self.vision_train_data + )[0] + + # === Audio + Text === + self.audio_converter = Gemma3nAudioConverter( + feature_size=16, + sampling_rate=16000, + ) + self.audio_preprocessor = Gemma3nCausalLMPreprocessor( + tokenizer=self.tokenizer, + image_converter=None, + audio_converter=self.audio_converter, + sequence_length=20, + max_images_per_prompt=0, + num_vision_tokens_per_image=0, + max_audios_per_prompt=2, + num_audio_tokens_per_audio=3, + ) + audio_backbone_init_kwargs = copy.deepcopy(text_backbone_init_kwargs) + audio_backbone_init_kwargs["audio_encoder_config"] = ( + audio_encoder.get_config() + ) + audio_backbone_init_kwargs["audio_hidden_size"] = 4 + self.audio_backbone = Gemma3nBackbone(**audio_backbone_init_kwargs) + self.audio_init_kwargs = { + "preprocessor": self.audio_preprocessor, + "backbone": self.audio_backbone, + } + self.audio_train_data = ( + { + "prompts": [ + "the quick ", + "the quick brown fox", + ], + "responses": ["brown", "the earth is round"], + "audios": [np.ones((16000,)), np.ones((16000,))], + }, + ) + self.audio_input_data = self.audio_preprocessor(*self.audio_train_data)[ + 0 + ] + + # === Multimodal (Vision + Audio + Text) === + self.multimodal_preprocessor = Gemma3nCausalLMPreprocessor( + tokenizer=self.tokenizer, + image_converter=self.image_converter, + audio_converter=self.audio_converter, + sequence_length=30, + max_images_per_prompt=2, + num_vision_tokens_per_image=4, + max_audios_per_prompt=2, + num_audio_tokens_per_audio=3, + ) + multimodal_backbone_init_kwargs = copy.deepcopy( + text_backbone_init_kwargs + ) + multimodal_backbone_init_kwargs["vision_encoder_config"] = ( + vision_encoder.get_config() + ) + multimodal_backbone_init_kwargs["vision_hidden_size"] = 8 + multimodal_backbone_init_kwargs["audio_encoder_config"] = ( + audio_encoder.get_config() + ) + multimodal_backbone_init_kwargs["audio_hidden_size"] = 4 + multimodal_backbone_init_kwargs["max_position_embeddings"] = 30 + self.multimodal_backbone = Gemma3nBackbone( + **multimodal_backbone_init_kwargs + ) + self.multimodal_init_kwargs = { + "preprocessor": self.multimodal_preprocessor, + "backbone": self.multimodal_backbone, + } + self.multimodal_train_data = ( + { + "prompts": [ + "image audio ", + "the quick brown fox", + ], + "responses": ["test", "the earth is round"], + "images": [np.ones((8, 8, 3)), np.ones((8, 8, 3))], + "audios": [np.ones((16000,)), np.ones((16000,))], + }, + ) + self.multimodal_input_data = self.multimodal_preprocessor( + *self.multimodal_train_data + )[0] + + @parameterized.named_parameters( + ("text_only", "text_only"), + ("vision_text", "vision_text"), + ("audio_text", "audio_text"), + ("multimodal", "multimodal"), + ) + def test_causal_lm_basics(self, modality_type): + if modality_type == "text_only": + init_kwargs = self.text_init_kwargs + train_data = self.text_train_data + expected_vocab_size = self.tokenizer.vocabulary_size() + elif modality_type == "vision_text": + init_kwargs = self.vision_init_kwargs + train_data = self.vision_train_data + expected_vocab_size = self.tokenizer.vocabulary_size() + elif modality_type == "audio_text": + init_kwargs = self.audio_init_kwargs + train_data = self.audio_train_data + expected_vocab_size = self.tokenizer.vocabulary_size() + else: # multimodal + init_kwargs = self.multimodal_init_kwargs + train_data = self.multimodal_train_data + expected_vocab_size = self.tokenizer.vocabulary_size() + self.run_task_test( + cls=Gemma3nCausalLM, + init_kwargs=init_kwargs, + train_data=train_data, + expected_output_shape=( + 2, + 20 if modality_type != "multimodal" else 30, + expected_vocab_size, + ), + ) + + def test_text_flash_attention_call(self): + if ( + keras.config.backend() != "jax" + or not fused_attention_op_available() + or not gpu_supports_fused_attention_op() + ): + self.skipTest("`flash_attention` testing requires the JAX backend.") + + with patch("keras.src.backend.nn.dot_product_attention") as mock_func: + causal_lm = Gemma3nCausalLM(**self.text_init_kwargs) + causal_lm.generate("the quick brown fox") + if running_on_gpu(): + mock_func.assert_called() + else: + mock_func.assert_not_called() + + def test_text_early_stopping(self): + causal_lm = Gemma3nCausalLM(**self.text_init_kwargs) + call_with_cache = causal_lm.call_with_cache + + def wrapper(*args, **kwargs): + """Modify output logits to always favor end_token_id""" + logits, hidden_states, cache = call_with_cache(*args, **kwargs) + index = self.text_preprocessor.tokenizer.end_token_id + update = ops.ones_like(logits)[:, :, index] * 1.0e9 + update = ops.expand_dims(update, axis=-1) + logits = ops.slice_update(logits, (0, 0, index), update) + return logits, hidden_states, cache + + with patch.object(causal_lm, "call_with_cache", wraps=wrapper): + prompt = ["the quick brown fox", "the quick"] + output = causal_lm.generate(prompt) + # We should immediately abort and output the prompt. + self.assertEqual(prompt, output) + + def test_text_multitoken_stopping(self): + causal_lm = Gemma3nCausalLM(**self.text_init_kwargs) + call_with_cache = causal_lm.call_with_cache + + def wrapper(*args, **kwargs): + """Modify output logits to always favor end_token_id""" + logits, hidden_states, cache = call_with_cache(*args, **kwargs) + index = self.text_preprocessor.tokenizer.end_token_id + update = ops.ones_like(logits)[:, :, index] * 1.0e9 + update = ops.expand_dims(update, axis=-1) + logits = ops.slice_update(logits, (0, 0, index), update) + return logits, hidden_states, cache + + with patch.object(causal_lm, "call_with_cache", wraps=wrapper): + prompt = ["the quick brown fox", "the quick"] + output = causal_lm.generate(prompt, stop_token_ids=(3,)) + # We should immediately abort and output the prompt. + self.assertEqual(prompt, output) + + def test_text_generate_compilation(self): + causal_lm = Gemma3nCausalLM(**self.text_init_kwargs) + # Assert we do not recompile with successive calls. + causal_lm.generate("the quick brown fox") + first_fn = causal_lm.generate_function + causal_lm.generate("the quick brown fox") + second_fn = causal_lm.generate_function + self.assertEqual(first_fn, second_fn) + # Assert we do recompile after compile is called. + causal_lm.compile(sampler="greedy") + self.assertIsNone(causal_lm.generate_function) + + def test_vision_generate(self): + causal_lm = Gemma3nCausalLM(**self.vision_init_kwargs) + inputs = { + "prompts": "this is a lily ", + "images": np.ones((8, 8, 3), dtype="float32"), + } + output = causal_lm.generate(inputs) + self.assertIsInstance(output, str) + + def test_audio_generate(self): + causal_lm = Gemma3nCausalLM(**self.audio_init_kwargs) + inputs = { + "prompts": "transcribe this ", + "audios": np.ones((16000,), dtype="float32"), + } + output = causal_lm.generate(inputs) + self.assertIsInstance(output, str) + + def test_multimodal_generate(self): + causal_lm = Gemma3nCausalLM(**self.multimodal_init_kwargs) + inputs = { + "prompts": "image audio ", + "images": np.ones((8, 8, 3), dtype="float32"), + "audios": np.ones((16000,), dtype="float32"), + } + output = causal_lm.generate(inputs) + self.assertIsInstance(output, str) diff --git a/keras_hub/src/models/gemma3n/gemma3n_image_converter.py b/keras_hub/src/models/gemma3n/gemma3n_image_converter.py new file mode 100644 index 0000000000..1901e7db0e --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_image_converter.py @@ -0,0 +1,14 @@ +from keras_hub.src.api_export import keras_hub_export +from keras_hub.src.layers.preprocessing.image_converter import ImageConverter +from keras_hub.src.models.gemma3n.gemma3n_backbone import Gemma3nBackbone + + +@keras_hub_export("keras_hub.layers.Gemma3nImageConverter") +class Gemma3nImageConverter(ImageConverter): + backbone_cls = Gemma3nBackbone + + def __init__(self, **kwargs): + # Always do image preprocessing in float32 + kwargs.pop("dtype", None) + dtype = "float32" + super().__init__(dtype=dtype, **kwargs) diff --git a/keras_hub/src/models/gemma3n/gemma3n_text_decoder.py b/keras_hub/src/models/gemma3n/gemma3n_text_decoder.py index a071dd10fe..aff1276000 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_text_decoder.py +++ b/keras_hub/src/models/gemma3n/gemma3n_text_decoder.py @@ -193,7 +193,14 @@ def build(self, input_shape): self.act_fn = keras.activations.get(self.hidden_activation) super().build(input_shape) - def call(self, inputs): + def call( + self, + inputs, + cache=None, + cache_update_index=0, + cache_update_mask=None, + training=False, + ): ( hidden_states, position_embeddings_global, @@ -210,9 +217,23 @@ def call(self, inputs): if self.is_sliding else position_embeddings_global ) - attn, _ = self.attention( - active_prediction_normed, position_embeddings, attention_mask - ) + if cache is not None: + attn, _, new_cache = self.attention( + active_prediction_normed, + position_embeddings, + attention_mask, + cache=cache, + cache_update_index=cache_update_index, + cache_update_mask=cache_update_mask, + training=training, + ) + else: + attn, _ = self.attention( + active_prediction_normed, + position_embeddings, + attention_mask, + training=training, + ) attn = self.post_attention_layernorm(attn) attn_gated = active_prediction + attn attn_laurel = (attn_gated + laurel_output) / math.sqrt(2) @@ -244,8 +265,13 @@ def call(self, inputs): first_prediction_projected ) for i in range(1, len(corrected_predictions_list)): - corrected_predictions_list[i] += first_prediction_normed - return keras.ops.stack(corrected_predictions_list, axis=0) + corrected_predictions_list[i] = ( + corrected_predictions_list[i] + first_prediction_normed + ) + output = keras.ops.stack(corrected_predictions_list, axis=0) + if cache is not None: + return output, new_cache + return output def get_config(self): config = super().get_config() diff --git a/keras_hub/src/models/gemma3n/gemma3n_text_layers.py b/keras_hub/src/models/gemma3n/gemma3n_text_layers.py index ba8d36eb04..16cb2d5f8a 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_text_layers.py +++ b/keras_hub/src/models/gemma3n/gemma3n_text_layers.py @@ -381,10 +381,14 @@ def predict(self, hidden_states): modalities = self.compute_router_modalities( hidden_states[self.altup_active_idx] ) + modalities_shape = keras.ops.shape(modalities) + reshape_shape = modalities_shape[:-1] + ( + self.altup_num_inputs, + self.altup_num_inputs, + ) all_coefs = keras.ops.reshape( self.prediction_coefs(modalities), - modalities.shape[:-1] - + (self.altup_num_inputs, self.altup_num_inputs), + reshape_shape, ) all_coefs = keras.ops.transpose(all_coefs, (0, 1, 3, 2)) predictions = keras.ops.matmul( diff --git a/keras_hub/src/models/gemma3n/gemma3n_text_model.py b/keras_hub/src/models/gemma3n/gemma3n_text_model.py index 2a668cdc35..c5f1e80a5a 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_text_model.py +++ b/keras_hub/src/models/gemma3n/gemma3n_text_model.py @@ -126,7 +126,7 @@ def __init__( ) if activation_sparsity_pattern is None: self.activation_sparsity_pattern = [0.0] * num_hidden_layers - self.layers = [ + self.transformer_layers = [ Gemma3nTextDecoderBlock( hidden_size, rms_norm_eps, @@ -151,7 +151,7 @@ def __init__( ) for i in range(num_hidden_layers) ] - self.norm = Gemma3nRMSNorm( + self.final_normalization = Gemma3nRMSNorm( hidden_size, eps=rms_norm_eps, name="norm", dtype=self.dtype_policy ) self.rotary_emb = Gemma3nTextRotaryEmbedding( @@ -250,9 +250,9 @@ def build(self, input_shape): decoder_per_layer_input_shape, None, # attention_mask ) - for layer in self.layers: + for layer in self.transformer_layers: layer.build(decoder_input_shape) - self.norm.build(inputs_embeds_shape) + self.final_normalization.build(inputs_embeds_shape) super().build(input_shape) def get_per_layer_inputs(self, input_ids): @@ -282,6 +282,26 @@ def project_per_layer_inputs(self, inputs_embeds, per_layer_inputs=None): per_layer_projection + per_layer_inputs ) * self.per_layer_input_scale + def token_embedding(self, inputs, reverse=False): + """Apply or reverse the token embedding. + + Args: + inputs: If `reverse=False`, token IDs to embed. If `reverse=True`, + hidden states to convert to logits. + reverse: bool. If False, performs embedding lookup. If True, + computes logits by projecting hidden states through + the transpose of the embedding matrix. + """ + if not reverse: + return self.embed_tokens(inputs) + else: + embedding_weights = self.embed_tokens.embedding.embeddings + logits = keras.ops.matmul( + inputs, keras.ops.transpose(embedding_weights) + ) + logits = logits / self.embed_tokens.embed_scale + return logits + def compute_output_shape(self, input_shape): if isinstance(input_shape, (list, tuple)) and isinstance( input_shape[0], (list, tuple) @@ -292,7 +312,16 @@ def compute_output_shape(self, input_shape): hidden_size = self.embed_tokens.embedding_dim return input_ids_shape + (hidden_size,) - def call(self, input_ids, attention_mask, inputs_embeds, per_layer_inputs): + def call( + self, + input_ids, + attention_mask, + inputs_embeds, + per_layer_inputs, + cache=None, + cache_update_index=0, + cache_update_mask=None, + ): position_ids = keras.ops.expand_dims( keras.ops.arange(0, keras.ops.shape(input_ids)[1]), 0 ) @@ -317,17 +346,37 @@ def call(self, input_ids, attention_mask, inputs_embeds, per_layer_inputs): current_hidden_state = altup_proj * target_magnitude / new_magnitude temp_hidden_states.append(current_hidden_state) hidden_states = keras.ops.stack(temp_hidden_states, axis=0) - for i, decoder_layer in enumerate(self.layers): - per_layer_input = per_layer_inputs[:, :, i, :] - hidden_states = decoder_layer( - ( - hidden_states, - (cos_global, sin_global), - (cos_local, sin_local), - per_layer_input, - attention_mask, + if cache is not None: + caches = [] + for i, decoder_layer in enumerate(self.transformer_layers): + per_layer_input = per_layer_inputs[:, :, i, :] + current_cache = cache[:, i, ...] + hidden_states, new_cache = decoder_layer( + ( + hidden_states, + (cos_global, sin_global), + (cos_local, sin_local), + per_layer_input, + attention_mask, + ), + cache=current_cache, + cache_update_index=cache_update_index, + cache_update_mask=cache_update_mask, + ) + caches.append(new_cache) + cache = keras.ops.stack(caches, axis=1) + else: + for i, decoder_layer in enumerate(self.transformer_layers): + per_layer_input = per_layer_inputs[:, :, i, :] + hidden_states = decoder_layer( + ( + hidden_states, + (cos_global, sin_global), + (cos_local, sin_local), + per_layer_input, + attention_mask, + ) ) - ) target_magnitude = keras.ops.sqrt( keras.ops.mean(hidden_states[0] ** 2, axis=-1, keepdims=True) ) @@ -346,7 +395,10 @@ def call(self, input_ids, attention_mask, inputs_embeds, per_layer_inputs): temp_hidden_states.append(current_hidden_state) hidden_states = keras.ops.stack(temp_hidden_states) hidden_states = keras.ops.mean(hidden_states, axis=0) - return self.norm(hidden_states) + normalized = self.final_normalization(hidden_states) + if cache is not None: + return normalized, cache + return normalized def get_config(self): config = super().get_config() diff --git a/keras_hub/src/models/gemma3n/gemma3n_tokenizer.py b/keras_hub/src/models/gemma3n/gemma3n_tokenizer.py new file mode 100644 index 0000000000..191fde8579 --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_tokenizer.py @@ -0,0 +1,95 @@ +from keras_hub.src.api_export import keras_hub_export +from keras_hub.src.models.gemma3n.gemma3n_backbone import Gemma3nBackbone +from keras_hub.src.tokenizers.sentence_piece_tokenizer import ( + SentencePieceTokenizer, +) + + +@keras_hub_export( + [ + "keras_hub.tokenizers.Gemma3nTokenizer", + "keras_hub.models.Gemma3nTokenizer", + ] +) +class Gemma3nTokenizer(SentencePieceTokenizer): + """Gemma3n tokenizer layer based on SentencePiece. + + This tokenizer class will tokenize raw strings into integer sequences and + is based on `keras_hub.tokenizers.SentencePieceTokenizer`. Unlike the + underlying tokenizer, it will check for all special tokens needed by + Gemma3n models and provides a `from_preset()` method to automatically + download a matching vocabulary for a Gemma3n preset. + + If input is a batch of strings `(rank > 0)`, the layer will output a + `tf.RaggedTensor` where the last dimension of the output is ragged. + + If input is a scalar string `(rank == 0)`, the layer will output a dense + `tf.Tensor` with static shape `[None]`. + + Args: + proto: Either a `string` path to a SentencePiece proto file, or a + `bytes` object with a serialized SentencePiece proto. See the + [SentencePiece repository](https://github.com/google/sentencepiece) + for more details on the format. + + Examples: + + ```python + # Unbatched input. + tokenizer = keras_hub.models.Gemma3nTokenizer.from_preset( + "gemma3n_instruct_1b" + ) + tokenizer("The quick brown fox jumped.") + + # Batched input. + tokenizer(["The quick brown fox jumped.", "The fox slept."]) + + # Detokenization. + tokenizer.detokenize(tokenizer("The quick brown fox jumped.")) + + # Custom vocabulary. + bytes_io = io.BytesIO() + ds = tf.data.Dataset.from_tensor_slices(["The quick brown fox jumped."]) + sentencepiece.SentencePieceTrainer.train( + sentence_iterator=ds.as_numpy_iterator(), + model_writer=bytes_io, + vocab_size=8, + model_type="WORD", + pad_id=0, + bos_id=1, + eos_id=2, + unk_id=3, + pad_piece="", + bos_piece="", + eos_piece="", + unk_piece="", + ) + tokenizer = keras_hub.models.Gemma3nTokenizer( + proto=bytes_io.getvalue(), + ) + tokenizer("The quick brown fox jumped.") + ``` + """ + + backbone_cls = Gemma3nBackbone + + def __init__(self, proto, **kwargs): + # Add special tokens. + self._add_special_token("", "start_token") + self._add_special_token("", "end_token") + self._add_special_token("", "pad_token") + # Image. + self._add_special_token("", "image_placeholder") + # Audio. + self._add_special_token("", "audio_placeholder") + # Multimodal inputs. + self._add_special_token("", "start_of_image_token") + self._add_special_token("", "end_of_image_token") + self._add_special_token("", "start_of_audio_token") + self._add_special_token("", "end_of_audio_token") + # Special tokens for conversation and masking. + self._add_special_token("", "start_of_turn_token") + self._add_special_token("", "end_of_turn_token") + self._add_special_token("", "mask_token") + self._add_special_token("[multimodal]", "multimodal_token") + super().__init__(proto=proto, **kwargs) diff --git a/keras_hub/src/models/gemma3n/gemma3n_tokenizer_test.py b/keras_hub/src/models/gemma3n/gemma3n_tokenizer_test.py new file mode 100644 index 0000000000..b2fa36b404 --- /dev/null +++ b/keras_hub/src/models/gemma3n/gemma3n_tokenizer_test.py @@ -0,0 +1,32 @@ +import os + +from keras_hub.src.models.gemma3n.gemma3n_tokenizer import Gemma3nTokenizer +from keras_hub.src.tests.test_case import TestCase + + +class Gemma3nTokenizerTest(TestCase): + def setUp(self): + self.init_kwargs = { + # Generated using `create_gemma3n_test_proto.py`. + "proto": os.path.join( + self.get_test_data_dir(), "gemma3n_test_vocab.spm" + ) + } + self.input_data = ["the quick brown fox", "the earth is round"] + + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=Gemma3nTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[14, 19, 15, 17], [14, 16, 18, 20]], + ) + + def test_errors_missing_special_tokens(self): + with self.assertRaises(ValueError): + Gemma3nTokenizer( + # Generated using `create_no_special_token_proto.py` + proto=os.path.join( + self.get_test_data_dir(), "no_special_token_vocab.spm" + ) + ) diff --git a/keras_hub/src/tests/mocks/mock_gemma3n_tokenizer.py b/keras_hub/src/tests/mocks/mock_gemma3n_tokenizer.py new file mode 100644 index 0000000000..9c770662b4 --- /dev/null +++ b/keras_hub/src/tests/mocks/mock_gemma3n_tokenizer.py @@ -0,0 +1,159 @@ +import tensorflow as tf + +from keras_hub.src.tokenizers.tokenizer import Tokenizer +from keras_hub.src.utils.tensor_utils import convert_to_ragged_batch +from keras_hub.src.utils.tensor_utils import is_int_dtype +from keras_hub.src.utils.tensor_utils import is_string_dtype +from keras_hub.src.utils.tensor_utils import preprocessing_function + + +class MockGemma3nTokenizer(Tokenizer): + def __init__( + self, + proto=None, + sequence_length=None, + dtype="int32", + add_bos=False, + add_eos=False, + **kwargs, + ): + if not is_int_dtype(dtype) and not is_string_dtype(dtype): + raise ValueError( + "Output dtype must be an integer type or a string. " + f"Received: dtype={dtype}" + ) + super().__init__(dtype=dtype, **kwargs) + self.vocabulary = [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "the", + "brown", + "earth", + "fox", + "is", + "quick", + "round", + "\n\n", + "", + "", + "", + "", + "[multimodal]", + ] + self.string_to_id = tf.lookup.StaticHashTable( + tf.lookup.KeyValueTensorInitializer( + self.vocabulary, list(range(len(self.vocabulary))) + ), + default_value=3, + ) + self.id_to_string = tf.lookup.StaticHashTable( + tf.lookup.KeyValueTensorInitializer( + list(range(len(self.vocabulary))), self.vocabulary + ), + default_value="", + ) + # The usual tokens. + self._add_special_token("", "start_token") + self._add_special_token("", "end_token") + self._add_special_token("", "pad_token") + # Image placeholder token. + self._add_special_token("", "image_placeholder") + # Audio placeholder token. + self._add_special_token("", "audio_placeholder") + # Tokens used in the preprocessor for multimodal inputs. + self._add_special_token("", "start_of_image_token") + self._add_special_token("", "end_of_image_token") + self._add_special_token("", "start_of_audio_token") + self._add_special_token("", "end_of_audio_token") + # Additional special tokens for conversation and masking. + self._add_special_token("", "start_of_turn_token") + self._add_special_token("", "end_of_turn_token") + self._add_special_token("", "mask_token") + self._add_special_token("[multimodal]", "multimodal_token") + self.sequence_length = sequence_length + self.add_bos = add_bos + self.add_eos = add_eos + + def vocabulary_size(self): + return len(self.vocabulary) + + def get_vocabulary(self): + return self.vocabulary + + def id_to_token(self, id): + return self.vocabulary[id] + + def token_to_id(self, token): + return self.vocabulary.index(token) + + @preprocessing_function + def tokenize(self, inputs): + inputs = tf.convert_to_tensor(inputs) + unbatched = inputs.shape.rank == 0 + if unbatched: + inputs = tf.expand_dims(inputs, 0) + # Add spaces around special tokens for proper splitting. + inputs = tf.strings.regex_replace( + inputs, self.start_of_image_token, f" {self.start_of_image_token} " + ) + inputs = tf.strings.regex_replace( + inputs, self.end_of_image_token, f" {self.end_of_image_token} " + ) + inputs = tf.strings.regex_replace( + inputs, self.image_placeholder, f" {self.image_placeholder} " + ) + inputs = tf.strings.regex_replace( + inputs, self.start_of_audio_token, f" {self.start_of_audio_token} " + ) + inputs = tf.strings.regex_replace( + inputs, self.end_of_audio_token, f" {self.end_of_audio_token} " + ) + inputs = tf.strings.regex_replace( + inputs, self.audio_placeholder, f" {self.audio_placeholder} " + ) + inputs = tf.strings.regex_replace(inputs, " ", " ") + sep_inputs = tf.strings.split(inputs, sep=" ") + tokens = self.string_to_id.lookup(sep_inputs) + if self.add_bos: + bos_tensor = tf.fill( + value=self.start_token_id, + dims=tokens.shape.as_list()[0:1] + [1], + ) + tokens = tf.concat((bos_tensor, tokens), axis=-1) + if self.add_eos: + eos_tensor = tf.fill( + value=self.end_token_id, dims=tokens.shape.as_list()[0:1] + [1] + ) + tokens = tf.concat((tokens, eos_tensor), axis=-1) + # Convert to a dense output if input was a scalar. + if unbatched: + tokens = tf.squeeze(tokens, 0) + return tokens + + @preprocessing_function + def detokenize(self, inputs): + inputs, unbatched, _ = convert_to_ragged_batch(inputs) + # tf-text sentencepiece does not handle int64. + inputs = tf.cast(inputs, "int32") + outputs = self.id_to_string.lookup(inputs) + outputs = tf.strings.reduce_join(outputs, axis=-1, separator=" ") + for token in [ + self.start_token, + self.end_token, + self.pad_token, + ]: + outputs = tf.strings.regex_replace(outputs, token, "") + outputs = tf.strings.strip(outputs) + if unbatched: + outputs = tf.squeeze(outputs, 0) + return outputs + + def __call__(self, inputs): + return self.tokenize(inputs) diff --git a/keras_hub/src/tests/test_data/gemma3n_test_vocab.spm b/keras_hub/src/tests/test_data/gemma3n_test_vocab.spm new file mode 100644 index 0000000000000000000000000000000000000000..75920d86bec469f5202c7eed23e298d62cf82a61 GIT binary patch literal 238204 zcmZs^3s{ran&|%~;o^mhZp)??5gp61j&-bK9d!h(W8Lal#=6~%bu7oa)vXRv%2tND zEs{VWP;@9oN-eUfI}Nua1PtL8lGL$|btGdwmSZ`VqZ#U0N1fDdIo4zO`~NM}?#mpW z=l8ttTHpHC{av^3`@*=WaiK|XY}u9)Bm6z78>ieq&)V|`?(Ki%&UroK(RJ;($>Sy@ z?b*BKmwPv7{bKX3%q=^%|8Q&UxTvJ(rBEU_hkKI@8-Q(8QXssN%Xj|q|7aQ9+yiu zE@oWBrp%n(dv|4KZQHUt{c-g~jtl#@_Y8Y?ZjX8LmumHzxGs zM;_a$kDU)a9^UYatT!JzBgTbt!>&EQA38f{*Vc^R_xNR2&gAHgD?;4?)Snx_W0vU{H^8>EAO|Ohn4nQ&Htmaeye#@Nx#+nLFIg`)qSef ze5TcYp_Tv8h9;^0@mr+oNB5G6nVW{hlqCR(=Xqh!Yy(ejILqGi_`5wi2S z2+8;+LjL}X2+3R(DZ8gcO4fuQ$?Ll#Wi{n^BVv;5X`Cd#yg5m7o|zzfUz#AfpG}m1 zcw&O=`|QW^=C>1MD{1`d+aJq*hHlIw4fj zWSso$<#Dp%voZ3w6UNKNRb%9pt}$YqDDrnnV?2SB+OLz6TRJHpD`Mr|XYoIX{^!Wjc{-^;@B2bzlZq2g zJ|+6K)Qx^SaYYhmvPfaPPWmQj#ZS2kW)Xg`PRxcdIZIf58+m{W=mxG?XwysReP8Ru z|C(0Ph^vuy+l1T#J77qK-9E>860;XZl?b=3D^i1-QDH%24f zxUJ-|n0o>;Tduq{Q-(@bY5y%^k=BgAsX`-eq!aW58tR5LKEc9X+@`I;ZxZnazuTcT zObnNT+Zy@i$zpA38ufZrBXL|a1vF#?cO+%rC$f(`8Ibxajr1+ji2n(V96X_seEfan z#T=uNlZRvFpN>8ur;M@k$;+{F`ev;Bv;Id?NqS|3J4-+Mmn%Q!-XF^%>ik^qH2K$~ zljVHh)AHFDiE`oVr=@M3MpC%eFc>Z;x$l4WJ}m>E&Xg}c{}Z|N-AtLU@^&j+iiof8 zyAYXydp6-rXTrouJ}S7c7dadMJ%m|;EaQ5)YU_Jp5{1p4fGQ?vMo7G zOvGDUCDIsAo=Qbd;9t;0+w_arHR0mNT|wV0IHHlW#Nm$!rH_P)0Y6ibMoK&y*-x5h zsfRZ5dXT>HB6<32xPYI2nEpYy0)s{hYc-^yk<-Lk+@ujxx=0EApp<;=Ar95|58_u4 zDRO}@<*P+nuTy_qyF=XOS@fGk;!D#=8Ga25=r149U$}38@Rj(fzIYkEWi0(2+4nWq zpV3MU*UXgL7)m{&U*(z;+33Pv)qSMvM)rMA_>bvt=x3FGkxrhVEQYH(xy?PM>pJ;Y zRhJf>T;Tc$es#+l?Z3xv!A&9i$)J z;NBrPPdtV^!enDl(}<%L`_I$K=lGi^>f{dfX_%vtwh`tF#uWQ9o!r3B%$TH`iY=qx zA)W@@dg{RtFVcoyGDoC_GBi}-N8EbSFGQM$ag!&zLF7L7wT;zDDfM7XCVlMCNPDR{ zOFvePbt1Yc)X!(((#JSE9=)QJa4&_4KZO4N268X*g>b2v6D@s>^c`Ec6w{Wbv}pQ! zv_#N`1)2yc#I2|842&HOtn^uH#}_6Ww^2&wnerl&;K6SmwCEeA`ZMcM6q?pu!E zQe@j1?j_z$xYvM*cG`;EOFkBJ-5g3@?$IWVI>}IB@H@DgcD{{$;r;~Ucjecuk#*ctVR_h}%-pj9cM18hqSsW2q~pF$SY`YD z=x5+0oP)v@#6=ti&|5UJoHi=XCf{5$lDD!Lk+&&FDRo^!*mCl}i)$V@NE!`{+4ZY6 zvXi{Gk|*;<=7uKvBp4KwMGy9Djl516i;DLU^+-Bvz)XE_MQ(x&Fk})Q+Cnw*5os>L zuZ(hJabF|p)u2}bHZ6UvCzz*884voVsJ_hl;1q3&-Sj2F zPV^VGQkW`omhh$l+HMYMQ$_>*sDQL`Nb?fcF2InAlRjBOeUtA^$h2`~AdKhHj@0R2lCR#(9-;=``3mMPoPo`PaUq!eKlfSoj z(U%6oqy$+y%-WNC%9*uaP>!9Av&IT_Exg#IO38np2DCP+#~N^VoBcpMoOpMc1pjh4CT*ztQnO zAo3RX6+k4EL{gu$a|3<8nz;WFza`YCIZ-3akgXdTEAUr#bQ)=o(a5{_FC@NV{7MKv z6MZf;&^G!=(xVTU=24%kX|u@>=jNqcpU8cO32$FVy?;(W{!Sx}%h+RZ?Xf)@Zt3I$ z!my3dA0%(z&~NI|4X3bwY~6-lfWDS{jAwMCeWaMQHse-xTu2%l&`n!Jb|AALj(n@J z+HZTfK2Y=Y1mc+lKZb+UTNly`ZT%vjAoIEJ9QU=WKK>jwM7*hRQNb10sLIwM0;JPvHvsEKw(D z=9n!SdBAnU;8ZchBuE`;7}68updmp_%MvI*?Y5J-AvHk^mIQf*vg_)r5ruD;Przgw-pKp~Ls2r6o;O2eFi~v`Qm22Cn(J5!6|W{~PS$ zDrXUDF1v-@TGY6ReZI;*Jf8Cj;%K}lGL!3nioTinw!>W9YW=kkX~u4z=Gsyf$2#u+ zmis@`$aDCq_0tCY{{np_@~@O1_iaXB3#Qww=LbNo>npHZ{Bnu^;5}@f^}VSZfAl2o z>)XaT0d{|ouvzNf^Vm`a*9XI-Vwms}BL16<3)?9Vb@eyglS{Zf;#;Nef0DXfj7{Q~ z4}Gr^SE9%~+EdNHYRzw6tdr+)FN6ldG;AbZ&In4dCG$F+IPiOoxC%2PrJ+`&-i|-A zEs}7|k6&=D!G%BcsWcfs4E^Eq&r0X||3I#!`hS76mx}C0>ivYF9s6EX^ZQgW4r}CN z75)|6FVUXZwqb;JCe931H-ybb_Kle;2X8YbupaWW9@;>>rPzQWG)&f_?|{va1%n;{WfcsyIiXw|E8oMi2_09{#Y2u!lWen+qi^}Y4v}!K9m3zdI$SD|(YVc?aEV36k&XW#d*hKGS;FN;2=TN`N*mOE=%vxO8e;4=Jz=U3cZI`+<^0r4K<+Ea>m-wcs_J0%m z{ep7>`h!|?T_Z0ABiuh(J>=-2V|K`u~LEp&D@}YowPrJR>6B0rmpK(WT-13wj`lG?qhggov|NC&x>5(m@!t zAJ~n57Q~RoSHyP>7ICe#2wVFqTztoLGKTd2mnsjH>i;T&z8fK>%b07@*!R#U-i9I< z>v}L%aRzA6NeFolhjH-!vz*(~9`)?c{0Hg#jbT!>i}4b_d4Q_#`$i|>$OZ5@X|+{} z%;sJv@;EF(Pl2DnGI$7qo+6 zJ7W+O(igkbzM8?vT*3!Si>bbZLWy_|(1E1o3bL+q6dQLO)?r0O7jqBKgXUt~_99-6v@ z!y)Ya7HcNLuO;{mWYNZGDJ`Rt3JuDZFG zGzu9%DlO=C;^-%iT4WP*24Pr1Y0!`2QTVQl$$iRiI+!vKAOy!y8h?oGnI zg`b_i3`N^%!&>xp_`%@-;}UCqH8xb9Vjs`_H;A)j`osQsN2SjiOg*FVw+sEA{IX`K zqi%}EavqJmPZ%8x!FZSiaS#pR;H1uCkqOY(g}t!$P!p;;b5Z?&2L39pBKko%VI8Vo znEMS(aH`yDUmT5p(MHZ%kq-z@rHq!dQrSCgW4z886?*Yi_NVGPZK?XNSzV78S&N@l z>D2EAbv=#inGe&iTtT}NR@v54u2mCm)c%OK?$P~g@Nal@|9W-*b?WaneV6zg%3tLl zHgRpax}M2(>QwcWLW@Qwsr*x3m3}Pxc$i7JzE7!-FST-XE+$`a34I7I!e!-8 z8B}?$q0d(FpTgZwo2Y#n>mWV40ly;dEj+_GM*J>SkIG-=jdbVZr=z`0>VEuGpB|69 z@(AVcW=>UYx}C7e*b`wY(>WtneuVSCK|kLcDu*>mQq8q|@UsRjQ{h0hQw_2ZmXOX| zh4N2)n5Vw)DCZ~GAhy>DPn-N#J2|g zi;|@8XGvl>pCpyFDblx!u&NFj+t+jbaCeIQcUA6V+y@0vawthQaZSx}qvP0g(&}F- zHIz+%i+Z@td5lG54Q^u-Wo0d`%C;VN$-NXQy1i63p|6FaQ%hNwaeV@OtL6dPnZDrf zWuE7(zwe5g@3EJiNS?*2y}JKR{5kXYbN1i&HS$x|zo|>5aD;Z_*{R{WRyGj#X3&Qw zOHm&C0`fbb`cu#R<}!sDnA7#l5vu+Q{7CAkK+Q4qGyO(ju2=0}MjWI0XMa&Zxzw59 z==E9X+0-w#ti}U%e62jk7x+!I0k%u(umqkz6rcy*~n z6E+qSU?KO+B9Hr#%axmauSEVietODjSVmcKXK~$x%s}cB@t?I+Dl52_C-N*|4`cs* zU(lAE`|Kf1Wg2-?{gnK zWvpafUro3Ll%oz=$~nl8ijO$dyfRvjOrB|}G`X*WG~c1GRSqtZcQqkWMZOPLEs;B1 ze~)shv80l+o+Ipi{FMD(L_a*tehxW$ZZ$}Lt}-TYJ*dK5mxt+)o(g}KaCenI^>+dN z!J~LCp|_m~l~(4{ljz5hu7OZ#_cK?McQuA|GgmjD*T4zt{sP2Pws7JYj~u|=SE7}P z=>97y^mY0?cJmMTGnVRC;$H}Q%IW`{>tCe^OE;bmpm(LPZeu;b8lix6ig)qu0@sR| z^Hu*&QQ^L(UXzwe3FTAsPdR=i)WalJ>KgfIP!bz0q0LHW z(XSYP)+;}x%BLN@XbyXl56RDN^3zX#D1U*5H3n;zzHQ{CPb+`@D89Z3@-qfook(0q zx!%fs?TkO4AU{UxAC}8Wc=c?e?|sIJm+_y5ze*!QjT`VLgix-HBBw_4L%n^Xk(bfW z!g+Q76Wl)r8~v30t9Hka<)&V(HF^7H^bEL|a_QsQH~J|beWEXkHP-^-Vr^W^`mR`o zV@x=RYju9Xkk37anRzAaQ5ZiYySYKt$3 zm-*A&8zLPl+-q9-6uB4v73^a|JN@AW)aN{rft_0U-?#^e`!MDH3cYGN;|%uj zw-eZ_{g|y$tmwZ$syLF-U0>*=oxQV)W8f97yoR5;ci?BlLHY}EU)jsK27B+`80rRj zMX!}Z_!~Z?-EV5;qfa@vewFKluT$ZTT6O**X7rD~)5?`^Ik)Mf?4R)r5`pW=RF~kjjTq#iOh#$Q1;W;yHva^*eh`Nz8N7N^qy}c#Qn1f=|aA*{Gqce zLOw>ic;IyWL!MK97$FU~+g{K~3;o`Kjdz9ezQ8KnDjYPvNWD@AEZ6l7$d9N)O9_2t zflfM;b@DFxbRmzVv)4~$48q1f*u@zD@+j05Y2?@J?Mz3t;b75N|N18`-PR=qkh}AdA0a4|J3AwQJ=G+z#Sv zd51PhB0TLi0l#X}yz&L(h*2l+5r-8`Lb532tP%|$#{5$dU4Y4 z)%^Mlevhv|i#`pW29;*ppaXu$W_`Lvc(+@kJ$KI&V7jdrwC)ZzkB)1@sQfIZW9{uk?JwxA#+ylP? z1JtdiKO@UwH@pY6a2fjH8~Atl2ntyX8SZK1IC?jH45#5=!3z5Jhy5lnhWaI6ck$yL zDLF)2-$xG!eVslqba;ZO{YP~vYltTpN8V(Nl@QtRmpq$?&9D`AKrM9@Jh)Si9@9t$ z?kxDt5%%cFT-Xo!&``=61!;n&)vVExwkFni$V#XNXCi$USqEn5WWU$3nDe7~%$>Nc z(0z|}z(m&TlR3Y@?Ns5w4fg+dNx*<$}g|g>ysJC|<64 zh_@HKbbJf8TOX;D%Y--55r>f1z|4I%=5af;5XLbX_eXJB`l9?MNesm7_)kW^U!TmwBB#0Xnit4TM=?W z^UhcQM|0&xp0AEc(cU5aUAPYqz-yZ#I=18CFdm{|62w9r7+N!_M9$}?{8S3Q$d#gL zABgcCub9T1!$$sHN_Q{Rlw5gMQ}*Nm&D&p0(j2;C(H#8d?=zJW#RJC zvM{mq$4UKQoHU$xN*a0Y*Az&Q=G6&e)l3lE@FcO@CrOLu331$umBx8t(ge+^VPaho zCN`v_0YvRes@l|^yGz$8`+E7!?}W&X9GTH zZ3^T0T$mi+7cNd{=gi#&9qHlHnHDamsblAfFxfy{3!x)EOg14mgA4zjU18!TUt4kS z0B;lVLN9p0OxzjxXTcuGh5C)u0si~Zts2TUCrt9u3&C+KTw1^mHfSV`ra2mE#*VDX zoF^gexLc48?ApXVWl#>4Pz?nh+5q&>Rm*w0o&IIvol(Zt-W>W@9=3w?;cjJ|Xd7XC zIlc9+6#)Af=3)uly^kVv!iFIWUd2=t|JRckOfHzMgz8uc| zZ;?-RFLZ(%>hZIH58b+tvSstWANpx<2N&QXT!Mg&^C;xdYhf~k zyaqQQ__HtxBJY5={VBPNyblkcc;^%`F{bO1uoV~&(eU^jMD^E6xZ@xJX248njAx!+ zrjur{f(`7evG;839gNfBWj5jF!hBc=OJFH1hpy|4ZNnlxBkUV*Q~wpzKX@5Ceb5@< z`5+tzC$xj>mPp3{a}5kdgiBx=JjWP5CR|n$=NecG>tO?Ig3YiMg71gR4rGSXOX$<3 zTFFA+177`8l8fxX&jtI@-P>t9$VV>(A8QORZVzqO3!TYf(g7}LpU!)@3$PVvqu;iU z4HF-I*}E=GJjh;b)18S;>@evd)iZE$J#uCN^bznXfFJ`0#Sq}~9#e)5_zG|K|RjgoJJXP$_g6>G1Do%7O;T+(E zPUsk!DlTxNy9cI95AO^3&`*MISAuxAB!~yudt$0I-kvH=&9-&qbEe8^ z?l}wR-~wEPOK=&6pzCUabl*yl9;Evoab73x3eo`Ip#*77SLU3BXa#!liYE@4i830tGwEz`-PstW^~=+%q`P_U8y zb!4jC=N?z-ROwj9`xWTAWb6TY=3u{xv^jR{L3+U#!}GjU%1_>!$)go%!)>p{Mp;KV z49v&a;&|?lhDi_$agYErpo_lLo%yhjxfv6BaeHv9KIWr;w^|r~?2JE1r-$(e-9`Kz zRrI}d#*hAZnMpjeVJ^&v`gqnnwEseM>n^P{Z_&yU^rhg~$T|o=JJ_d6%^-t^pOQkP3Cf@x zDxn%`z~{yIcV zY|ieY*XPlGa39^O%0?LV4+|k^jF%4I z;YIqcv;IhBEKOrfO=oOHw&QlqW6Xt4F!L_MX5!fjJ0Jt8E%uM};`Sk1 ziLWh^@>8$QtMotG-G%Ic&VKCQ5-L{Wbbu4wP=72`EUr-TpD5RoJO974xY20LtX$^fb}+WxO<;YF5DkD7?9FmjCv(xQ*kN-H`{hrmzh@)GLEW`bPJ0?< zg}}=^izeLA6MUcYafB>HUjj>EIjn>=uol+C2G|6_H{-_oj>03?va}Ya|>$$KW@}V9-OInl^qFW77>>Z-SgkA=Y zq9|!O5=9$Di48gWozqzA1Dh`Adhp4yQi-gF8W>87lsaVKl?X8-i#aPY^>QYSZiPnT zX(FEHGf`sYAl1eJsr>|LfMYmHj6An+5Y7pm;07O@gwxPwr#ZTu1sFNuO}mTGFR{cJvmc!xJTEN#h(`fQxVmF2fL9 zgD&#lP5yiAybG~{`lW6?NH3`MdFv_C_me)-N!i+wE*I$&cPG*r8YMS~CkS`oE_6f^ z2JWM~#}c+ z`-4$CNA~t(e}mW`(&xwi&NKhPad1NWI_wWRpcCd2hx58t<|DoO5YFecvJia`QxR-;sf_R}dHrgM&``CX# zEB&GED(kNR>#uu^zt!n}SnR4Q>- zLk-kH@U~VZ@c+^ z3q1&T;4a*Up%+8s0Wxrq{ktVhbjuifVLU`bJ$@GAo`h~?E>!!KSoAn>ke8M@%#mOt zj|uoWIcJ!GoCzJP!(}#dF1UCfqX*nDANNA=E?`}-nDc`btP_&i{}V?Ob7V8p%6i6D zOWcP@qmB6&mT=EfSPm;;bo_5)yn{8kyQrUT>SA=BC(pg;Ka8)b*#CLvUpU^+{L8g= z!nnFwe>AcFpqyq8?~4-8dN3L}Cw+(Sfg&k?4Cf&DS&_|9Jcjz@d9Ndnyk(QeedJXW zCYuT4w9`+KJD{V9IRKdjF8q5k=>s_$*@HV5yy=Wf3+SiY>8EL|Kd}8#`$t;A26o1P zmRfA2iajj%PK>>pcmxwO*J z#=EOfLmYKrR$S%T0n&o350D>p2RNZQ5qC2An?wHM$zKfl%Oei1xxoj<7v2(m%!F3E~GEy4_Fww}~Gv6J`jm!3{X0)*r|_ za2L8xVE@nq?m_C+PrVLf|0C3ME$zPy`-kJ;gm!R22XsPUQJCB(o*~{dc!1RX6}AB5 z!FV`NytcO_8hsMPg10MQ;*h}!d|QH?0W)DX%!L5`XFhW1Mc$1>sd<-Mc6Q1&-EnTgwl?6{?q+30g&KJ;X=KiJN?A&>D1=>_#p z@ItOF0iT43Igxb+x&>K3hqDdzm9Pey?c~G4_~0R3WDBVAaV^)@!v@#{n_;L=CtH!~ z9CZgW$i689nFV_w7xqIw1m4J(4a)vPuo2v4%8&gT{U?R`9zJ z=+-v&-^aqG{7j~t!|k}u{=0(x_YwBr_*V_)$l()vrP}|7yfwmK1E+Ermt9a*1n~;U5PNNcp@xIhdR+hibFs-N|`U>G_omaV_}HSb2}Ssb0tX zb?N)0YV1DwAUjlQJfU)p>o-&w2=G3{(S4jZ^Sd0!im+Gw%)GSw8{Yr6XvXl}F5kB_ zjg@-OSZT7vNF&c`tk66XBlhlzVr!cy4xa0@9Gf6*BU7aHYP>kFPnP4FC&i_CTH5m_ zi;-uLUKss;qKf{*IC-CRieKg%?QMKNj;?zVyXdD~25F}gv=eyeg@_M&sb>!yx6`gJ z+8NnqpOAr2B?1~g6#m8N*k z7#4F50k%1zVyFIER)>mXS*Xk;%xsto^I;(@fu*n;%@gY3-2uPsE3Jj3gRbdapb~&$cI8OK^X*(G9G-2U7%Nja{D;IG)CX+Yzy>_JS1pD4p>MM=<0_Qq@ zr+_dn((QoD=*3r9$AqvyP3L@o-)-n!K>ik!KXf18Wi+O-|4%24D)z~C;&c(Whcu85 z+(X214Q@aX?!aBR4-epn^Iz=Q4ZXXlWAF}A$NjAT`YFGK@)MWy80ANIp?B0$e(K*G zA0@gK%&~A6n;eg8nq0&Whi9*Z`YgGi-(6$I-F_ znE_d_2XbLQI5TyUk5p&x9XY&Pg>Hf}P-pON=%xRA=>Oj9%)bG?A>>*mR6`Baff*XW z3XR#U&-bZy0M7@(b_BbD7IX)+V(X*ld#Rl7t>Aob8RrYDNgvzlgs$nFPe2d2p%*;h z1s~{HqpNoT)pscB`9qL8ageSPyc2jH@hQ(0(F32+p2(AM8qUHwXeMt~7(IV~jdAM) z>w+_^6IcV;`}sX1!kAakcafK%p7H83atJJGyeEXb0YSI}ci}!ffS{4H9$Sb6KBJHP zgnORg`(5OCh=xhvq@Tqi;~)WMK*tF4Kg>o~dy*b7UI~%8xP8ZHcMp9OdZ96t`91#O z9@9GK;hx%_%>2H9`F$39RPs^4cyGAM*aS0)V?Gp5!LC}V-$eGmr`Z4cna7d6tP4Cy zFK!>QbtC&bds*QHcS>0-x62~%c0)G{0}S9tz2)0HRx->5g>0P ztU+y4vzYWMu&~c0NeB@Sek>8#}%x|n0cHqtcZw~8@Jk}uw<~L*``Dh|v&E(4p zHgr3*?4$fSVUoo?dmtC~Lp~IO3Cf_WpYjh(^y;Tb zCwhQ$ZZ~pBD8raP6Ce6XQ1i`cXzuo2eN>Wr}|GgKNkGWSPvooi}uul690P_EOAr2B?26VB`?jB1$6W+~zz4NG77~TI5GyXFt{ILJ0 zZQ560o2l&ok)5XUuonHf}ec>Li=fZw4@vN@+E#?FC zLgj`sD2Ga@h8k$JBuJB;XY5ET&)jWDJMI>wBZl84AdDFrzzPm zo#+41%f3eKZM+=R_)c*Cn$2$nz;SS{;Q9YDp8u!v{6CrJ|NNeSlXnb!#B&l(!&&IS z&jshu-7el=fD7ms!N;?GZ_!loR7{m#3%?sMi}Uwg)C2RYvf+C4ON6@&7WBvOH4Nc4 zUZG#{&V$2G-36$_e%^ne4VtdFBLKIcTXr_(o(9k@G@cZuUZJOJG))*%q& z9n9d(sWKit@J+l7UBRZGn<@dLMxybX1hEhYPHgBfXJQHHGhiktyHfpcHo6=8>Y$gzv zp!(l>!fgVhgs`?@O^#mtafp~+WL1DxP}Ekrtzh8*Sz2wq7T-P5`8^TFu)<1Nni(bXB? zS>(`%e0PGp02d+H&i81@%P<7jz!l27cgWMwfqnxS1g9lb?jSvrnbRgR4{TvhL*B*j zKDfs+=S^q+8VQvLxOJ=PJ1`#V)0w{@8r`~%`D-6%`RK7QI)ANZ{#wTTg>2+{(?-G& zzZGm?hZb-^9QP!^444TI&)@lesg3mw?ygk!t!d2j>CE%UUfiA>_OW@)^V#fMPqALVJqx_49J463i=;&VGrv%H}qyRwu2XZ(0ZHx2gir$fAq`tQ}n+!$_Sm{ zyc#Ndh$k2JLq2o_LMeZ!6r#Ja;~p@fmw_(_`_*8-W3k`qysM3$`54cqkkwF6zo|ji zfyK_gJ{!9NGj8uQtlL*Gx6i|lw`0et*l$1ftM}37VSmVa+gOlJqq?6OgvtZOm$T?((hdqFS_ma`w>qLNPDi&|e?-bRrEH?A;SX{ZFL+ zq4f;spKzS9&I#@4F6ba!XAbuNCVT()*@yA_LIHkX;4X3AhX->Rs$hUf{ihPa?!h`BWM0U;_uJwrD)W{-LRbPzp(}^y?|D3bN4ohvf!ul?+=k-ic3 z|HRqGUivsVi`f6N?{%?H?|@Elvj1I9JS$-htc8v_gn{+w?rhE$GNWVz`X=yI5%)%( zf3M^DH~Zkt_?dBUMecxl_Qx5>EU=(A&thM{AWHV&&V?4zbDWPBTO#F*;rX|R@}DOh zSZ6W*FJ}CQ7IX*v{{DGC_vb?)c=uux$RFOnG0^_dTSWUGqW!Tgbyqofj9yom9J?-70Th>Lu9rV}^PP2B2Tp9cCzHQ~&Y zqof8|r*vq5`g^?pdz2kUlsGr{OG|g9~sG zF2Q9OQhXgH*N_4AQ5yT^ANJ2Lu-?Z%2zS7%)_`jMeUbNUa6bUu8u~PhhiC{st(8g0 zScroJm;p0kHmLb`E^!|Z?Wu@*SQp0S8Ik^4MIFZf*S ze{Zq>CCu>=_A1DB+^+NNf1wkcx7q))ZZ^0W$KV2Sc3dY+fH?Rz&OMK~XYr0A{+GZP zgFkN1bkagz#?MST#hlZapx(|q>-b#*i{;V0n!~yVcMw`KDc3s6iL`HL{#`)%7Zc7v z`N0NuXaNWD-r=6Ra33Ck?j`y!jE89GqP(Ne|4Hk=v_I|p!}EVtRv3N$@1gv}-AP`Z zV_6#!Pb|bi0(597KmIe&-7%COW}?pqA8q8#ru?ClALim`PURgk30fYuOxEv$zPun9K9 zR@ea{TU6_Mqp&e#nPHD0)7bcc)oDK(LiFYh)!??t+eV&i@!2x-vQc<1DfV z+?)^g;;+u`N8dx7#hD`MIlu{S2)w3~;J18_`6ACRai4^tB;~hI0^jp{Hn`8iIk*7D z+m?!HC*S%%Kb`l#S!dw)`2E65=$By#dRS+;p%*;h1s`1F+6@T89k>hk;Q<7{WB;); zNdif{Ba=!zoXdnG$3rws0&|Q`Vv%u>05hO|u}&=Wc>alQWejTu@3s_~i(8FpEuh|U zut8&diZmsrNb{T&u_A5BDPl*q;C3K8$w$F3-}v)e0eYB89P?rHy`-bmFX6i9rAQCB zq4#_Wd%G0zg3q5Kt?4P!Ha$g-FHYfGffQ+CR zWF@-$Dsdl4ku~UR!FP+e!2`Xp9zSy+MK&NeK|T6r+%-KueUFj+S`ld z=(^{nX5#|+VBG>aa{D=XKXRT_Z=Wacop?^FHqMd5sdJ<<@T|N$FiR>n&XRX-KO^my z$>JJ(QcT2c9K(LSB1YOYF{97&oSw-K*B^Fl)1s3y!kAg({U$zHj`3mXhuKN;(J9{5 zT^1tsnlaK47%MAjuVcd+`OS6Cb_RK##y4N~Qxm0SfM;@ZL&0$CX2N@S(@$1v`MnqjwN%3p3L*jWbu9-B4+Mw04q4a37z1E?v2UP zwJw=wFUiugJ(*`O$*c>L#YY`^k!^-#X(eta{>Mv`nR}C^Jv&)Cad#li)04zUJSX8a zoQ3*1Nn(L>=+oOzTNslI!>hP(mlJIEmN z4!BP-27#xWG04UFByFmmBi`lOeRu%DR~d`dT*z6H`ra~lFy-O=|9y(+{zlnxF?ahiFB63N>~Gr-#=W7?q)x&-alNAz5#sfslE7l zT*L*N@H21WJvii6s7K#{%m52{Z%!xQ0p8`uy$4#*9qjFGw|PDdjct_ALpVR-1}Ps> zJzHoQX8va%r)QmHU=3JsiZ$If_EY?(qDlBhg}Cye>pbhf0oH#3)_>4j!TOJR%Ui|z zuax;F8~|HyXSF4pE9&s;a~1QV1&M;`M({^jWIHsWsLH(t=ILA|@@ z#qBvpTF4sw%$hK%QxfV~%QPUZV8MU%TydE7KW-oX&Yv8M*FHWSMX@!f&Da33CkZY_Nl#)J13 z{V0{baGpMKmA=9mkNX7uZUO7Ee)=Kx(HY6Qi+<)pKR%B&S2F9;b@W?kWNq98%?8#N zU<37D(GTxG?~ascmCkFCG6@+AagYEr;D`NxI_;lEo21e~^XtpDw-|1H!{ zH~ar4_W!l)|EoCHU@s8ho65lFoB{A2;1Fx0nZ!98=E8h1^1F%)kxO7HEQgg4;hV_VfFp5m3tiEgiaVe4`837z5#aQ?jlS#VSji2#`!xo;sqbHVrOl`kIvs1L)r(i zf7kEM-+aVz5}e)inUCok*z{@KXF+`f{P_D{H+k!sPT!hLA0vOheT1LL9M2eXjF|049@Etn$EQgh_2D*mTZyfRb z1L;1*p58$HgBN_zdX?{gZ}I&v(#f8`9o==F?|=LG{@2g%$q~nT*Z|HGd@q0;J)gUc z{h)7!9q^rL0e^>*rgrc>$QJH~zVUC$tO1&z!#O)kb=592PSGvo9n z;<*e%a1H9K`2LRZ`Ubky!x_4Z-~LCx0}kfz7S;{+TIO)Li{E{C0J?SbPf)-0#j;QI zFdla_OakMF%sU^kUqT=KKH&=GKSlZbDL+`RQvMOjuWSUAt;7*F0cOBVm<@AbJ}iVT z?715o@45Q$Y_7NJ_vZgYjQ`k~`oB!a!3pid*#8LjkL=_=XFAUih-WD*hn1kt=3P0w z^MJ0tDd~Z==RApz%qyDH~4P?W%rwrTfrR4ItXbee>NCB_hTH$z;7XS zW%bh+Aq)2&Fn+>(EZ<5l`tR4@e%$#`2wwKaWyo@; zf&-k;32q2{%()P9=vn?p74kHkg>!HLF2W_a3`5Y!Ierr~gB5IGhZb-^E8|8RW5#i$ zlkuY+{qg%pos2DA?D4x7u=Y+?XGyF-G^{@uE7Ui@KE?tk^W!zrxdB1A10B;Dhw(SQ z94~I{TYcMbAO8p7!?wNnd7!tA-w{itpF!{_d$e@E*ZGF}cr%{0PrT@UN`Hj$5Dk++ zJ)4U~4pEnJ$OM=HGhsH&h54`$f~@n6H+la527Ltg(94{8BA3HTSOe;r>00D^*Z`YA zeLG>ne>3{y@3*(2dmE?7AAi5yIFIkop&6`TOXs=t3ZC(&@|+rZmogXl`Az@f1ksNq z$PVJkfYJAV_zj}2Q&Xjz=h;0-_aM*2ksjP$q;DhN-zW3^eJJ1GNAmqWvOS(>>%2eE zf!;ZSU-wkWBECJ43;UsidtE#m&qsIjyu9b$R4GI^fsbq60n#{6TF5f|%*0cUtb}?4 z&r^{#V5y*79?DfTRqAk?p{15`5ypnJSMmIP7UhTLG{R-#pM!rM8M-I0;?XDgQ8e9HIQk-dp62biLq%)_BT)mGa~7M79&oh3vrHiF6L~8wbR57S6#1 zc>Mbb##b0qrV}^i^ghG0Tdw(Vdl!?&Jkmlw?tkVg-bJAQSzJ@)BH=DWJ-UhZ8$!2I zmS*x}-$xm{y=~F$o==WFY_xCuvI*%Qo z-{snUFcOEC-~R|O7e2r}B)miLeS+xL)8~_>O7KhGA$To8!f}s>XqW^|p{zl%BP+5Q z?2)WXk&XqdQ^B}9RALDi2MI6(X2NWk3*Fc0g8^*&9yX5j;O<5G7z@0}wpqOYlgQdT znKk%y?3(p&J8R$00rn^p`R;yOXpfNI3n)3dC((XUV=KN0l{ads(LQh9T#2PwEW@WxIE571ueZ_R9WJ;7MtHc#o zTye#0d zV8`uY*VVJ@v|(EOp7DDt*|y0VDYMyu=v>S{k1l#Q9<5Il>)(^kQ~62#d7skurnerF z2F}p4ulcWZ&(hB$f8T#mSpQ&`ehG2aBeh}3tEj!ijw7$5hTd>jyW%Zv8R0%Ob+C8Q zc-nou$^O)atVhEx{ojYI2S+Bm&C|_zCRgM=|G9&Ec!2(2Qm0@rO3<$Vw*#GQ@hrON z-N;>$2YjE$>a~AyT$*WVws_VVv@X{EMebAj-=s4Fqfm-*n1G2Wv^S^tzGql(O1Sb{ z?8sjjw?m(fIOevDoQYYOgTne7;^)z`&&rorKwpIXX!#Ox3~raYIq{ad9ZUSS3~_9H z6Ot$7wbSMX)fl@Yeic?@Eou+(Z;4+|--yj9$97cV{q2{X!n?5-73$o|e|7zz=BE@s zj2u6IHF*>X^eFFAWUsvl(qx74^KE3`zba$MJWk>i&LH;boh8rXA}*s<9*^zstMu#) z`EQE39`x(T-bG&IJ)jJz8xkt{5E9CPhHhogA!QI!8ka5doUijD~_Wv~f|NZp; zqvbB!9?%=Dv&#>$%eR$;CVHx)B$PSFOw7U@%)%;tZLVz#`9~K>$xs8|6bo@ z96!_~obpT>$)sa9JFXntQH7n@jq#rQUb4

d`P#I%uRPmwP{m{)xx_1>aQqHv7W+ z&v>to#+Up_|I|(S9&vo&e&;xd!>Gnl#QyyR8SVMFre7Q%kP=R#4O!%I66u@8;S_lW zXK@~_+loV`r#M`sXXk4-%>F>QOuvdkyFobifpwA9lZr#H_~<{lLH42c`QlJBwm96P zC-!I;yr!K&zlW4MyGb~yZ4&1IZdJC$b$DK#Yfh8jd%pJJ&}$uz-X{i!IRCEy{~}H6 z7WRGYqhTPu@~^GW@`e6kFueqY{(ouxp8rp{X@@q*XzRT@W&}o|6yq=+6Hw7UAhdhd z9b4paGCN3~R+bmmXQSu)c|Xd5W@W^2GVQpQ7rmdQ-p@kz-&|{gOJ_2sVmit&6Mdgy zQ~c=35c~dnzF1;?Nn?cGcrwiL+pDqO+#=s&j_^F>eysj|!F*kMhHRb7mqD-mocsNj zIZVEbtTLkOzUw1%J+AM9vEuul2>(3biE!vjad>0KkZ^F)knp|R#o@p^9}3@{@u9H) z!qD)Y=HX%AfRBXNuMH1-Gb6*dCw(mJnf9^pt>UM`?&VK~>z;d>{k$xmTiv*bT!B{g z@+xvQvisDd=w8T2fIM$mmzqyB|$>dA~gw~_RoIE$ z*o*x*h{LGHQ6!K;8rAg!!Vk_Bg(H{Db6jcv;<2N{_9>%7o8Pji+@~+@%188XjSM^R zkHy2o&gPGVyx&gZpWYlAc3&70zWLUN!miOn!?%|ghrJt$twU*@SHFMf%|YfO7KPV$ z4bi_fD17gEm^H+SdH$!vJJK>3`e)EeZ$tIy zqLBN^Z^H;_jY27E7Z-(^DMeu%J+aiaVLW{TQisg3K@yFaC@#&PKAD`3R^?3@ITM+V zhh!(+1JTNOfr6@6!Jx zzq~5H%#!9D%)5Dp7hqv9Lx5$zk^RsOxJ{Cn5?f7kne*ZY5${r_&r-+otr_I>U1`{6j!Xj%BK zZ}eT|-@D4ccg+d+txK=6QT@n*;4EHtEec`O|c{E*kH>6x+V?Xzay0;#LdiT|U#8~l2q6sNn z^quojd=Ku zYMrOVal=sa&by&u`MV*3imxg2^%pn2=)M-Z&kgRAKFV>upLsWwl07fqe>nf|3HwG1 zkH-W|M8zlc|9_$=Os4mJ;oUHmoQ}A5PZ^o}ynOt*0bwS+ResFylg*-M)zO{Z`l#sh zkYDV(k=MG(E_HkTXnAlr`~T^O{Zw&mPosR3Qhznc%ek-V`}My?Sb}9(fmK+Ij=R3W zoATaVd64XWUb@nbYmemU&4c7Y`8+Kj9`EP6F3E%Ej13Zw{qxZuw^lm6zcPP?+=%zv z51WO{u^l_H8++0BDQzdR`fauZ`Dp&%VS2$nIly+I$2p4)%7dd(N5TK*z6$rbM?Myp zMjNuo<0MX@Pdl>W`$hT)AA~c)y%p*T@;ol$GScddtK^QKj5L4P_lelIoVm-kpl55e zO&rrlzlHoE-vPSCcO|v|^|{s;vr~sY?00RP6#d`q>u1!zPis5)&#R}+|2r`-Y}H?W zNBABJ{=eUP_V2K-Z>!tY`&qI}xO@J?x;^&G9$U&jAIm%Umtg2AY5Hh-YQ`~iCO$;b77hS5hL<$rPQfu#7j7QrZSY1dUsjzcS-!+3H6 zq939#|8J!+{=$<{m=_@2wM9S02L1ofy8kK8F~j}4zQVZt7n}oe{@+ypnT|5d#4OCg zJS;%_+kfKz_wW(y(YHlJtDsETl^W?&)exe|HhsqcVaj8Vm}U| z`ossqVX}s-CXb?)o*+|59Ff11zWW;aTfLl;_v0EG|Ed3v{hIdwHe`{cjRUwP%{#ak(^t3WK4twu^Zyg0kVo~>;@{1G9c_-fbJo6Q{yQH+89i};?YqYu0s1VY?z4SwvVrfig||H! z=7@{ut^9>`io2BQu4SIM1;||E7uduvFxS`x;U&mT;TL$CpI|mW!3)L(u*H{&Ux8Iv zjoN3Ne=Pf-o^UM<-Ryt*Mx=V)JHK-`@^5SwSB~wd!cOeQUhKy~97Z*cqR=0H!TY|# z-zQx6JpbQ(z8NG|^381FquHaLMk?Z%Gz#^%I=oVSRaN0xip!(jF&*ec7G2GrFLH=$ zy&SvA|96-F?}YLJE#fk0MH}Lrl#|lQeO>-nho7Ri4%J4zZLNQLc7}R&igmN-=aJvw zICKwJ&*GxE>O<^%@+xW<8V5&SM@_SQwN9IE6Wd<64^5NQd1#!kEsMHo`uFf3`ak{W z4({Or`Y%yWU@%JXANoIi_j&wh{?B337=ckJMc?P`V@Fngm*4*9{0Qde#I+}Seq+9w z_^s2lvk>QOPZW-2?qqT*rlSne_Y%wCne?oD9>)&MqR&BowETxh^Eu{;>-lSQUC7?g z`cBCuScVl?h1KZgmt0G($3|5A+FAua_bt}*7yailuA&#$aRYt0g?8=k4u028GRq&+MRp74$hDgy|e^2{%=@SvhtmJ%;-H3D5bN}8yOcvkw8*LqOI?7^L8(=0G>zY|) zukk-~VpzVI7sLAd7m$mv1hM|F(Ek_f|7F4}5bOU{HVV6xvUle}BRJ`*Y^s zTYJCjuJ!ldvHt!6>+j>()8^klYyQ2o(&pQ@&@;}}>RfHc6Qp;PhEnN_!+1uZj| zM0!lKv!^snrcXuww6w)_i|cZp>EfzSl!h{LCTia*4YSBOsG&F1xUXbsm?yjdP4v_* z_gU{g{Z^;{z8(#C_5Y)hoH8cR^czf$>z_3zGU|6P*DdbF?c zN8^9z%Kr=H|E2OjS(yKQQ~M8v@xS`umq~90R$(<-Uo=KwxjtWdc8|1Q)0U@iL}C1| zaQ8OnA~(l)@2J{XfO2}xGsa4gRj4&qU?;g73D?+g!Z&!@w@9Y8DC5Yze%p_OIE-o> zMFJ_L(S|JYIEgbji`Zu9`MEx7eg3__C=GGmee`W^L+lqh@0dsP|1Q!m<0^X5_i^7A zc_Zq$6{U3UkoWKa{r@-TFs^+dIT$4vhQb&cvR6N3S~)R_UW#!Tk5+MU4WJ41LjU8_ z))}TxMqZmGhe!R7byL_N+q6^gr^f$oz5l!M-_!lSk}ti=IzeUhimw-i_SxP)Iv0EY z=%RNcw@LYc!ua2MV-LDrYsT0Ec25Saev9LOXG&)l=3pKcU=fy}?_b9h=0Eg&#Wy0{ zYwY<7aurr%E#mm!_2fouMmZjh|J_bcJnP>0R;uVbks8kKLUJrS>>0inwr@S#w_zds zcP0CG3)_+`%#&NLPVk@I*o*xrv|qIwtFODO9 zpZ&|`&9HsrI{9sv#9c9VT{_h`iUi_sncM7ddiH5)qf$GpijOeQFOeJT9AuYkPdg_1 z5ZdCgs6C<#B~PM;AK}ru0;hz}plPLa*SODh?vpIo|1%u-n)^qivT7(Bs+*!^jH$-kOE(Ym@t$ z>p#k?5#pO>yD!AG7n0bjpMBeDW7n?mSG{HJO~=GFd}`^X^hz(h>UH(=o6$a2Kg&DG z(duV1CEWAIh|v2nK7x?c1C^35)GzJ=Npzbp=Aj+=>D$o-}`%pvDt0SfyFXfNcJ^ZB9u9sYQB z#iRAc;$Enk3+zJo*G3*=FBkmr!fD}Nb?PGN#Ct8nD)4}|Xz=ch5= z;0Nxvj}LXV__bJ%jo6GGpZSXr*CHsVXAU?YeLKC|KxsF5_;@cUoEb8tM{dyvrXIORU8#}U`=WG?wh6I;OSA} zdz=0|9NO^b;f*&(g~NBPcj-IY^WsOtx0-K+y=z8^nCsy#C^ctx@^m@SV4Y zh3yM}Zr}Ewhbn0$P_=7xsND0huw&ZCtbsW)>>T=(J^G9r^jjLccMK2zbU@$Nv=4=E z?i(HU&i_dG_6r{gZGOvQ|8i?!Zg?W>J2WV~es{ROFa3$h;*j^-Nu0tNoW&bAhlC?* ziogzQL+Y7N z>!bO!vgI@Szdj=$eKH)s{~7*WbL!50GF+7AWn4usuHyz`8#|6Udi4^YsBqm-{tbSO z24e;i3;8$r3X<#iH#YEZtSJe%9CHs3_aCw56utjf{T%2CUI< zPUc4&|1g?=L|UP{jn;KSf!8Al(F zJl{*MAK%4rJ`79{SG}hsOe80xHd7L&lG9PM&3zqmU(XvyAv_aJFSsxF*f`&PqHde@ zAJKpW8j(a(v%M;gSQFEK=3pKcU=d#J@C>!Bx1xQD^*3km@hdN~`+VQKeBT-0xB4zm z9$O>7`Oc5Op#D=2wKz62TKzZN7`$SA)Y4mqbid-Tg3OSs$qHpotNDq!PZfo=!ddlm z9EaO|*EzJG3*!iGs{8!55t~u{LUAZ3YslKU#bGKAraXC5op(iff%xBrOW)zQ&MStX5~*p0pTN#OS<58^P|T}wyOwbi(PZTzkc>VIvk z9GMqB<{Fz_^YIhzA1%%9-}zfTo3?uPf;4)q^LLc&`L%HuWD04tq2lBERmnU~;uQLR zXa$hM5u@5xkelLs9ZgFiJtj$L6MShxh<9@prJ}ir>rwjge~ce>T(ax$m;sF^fJ2mBD=a9~-|tE96NyRjFo>fa3Z(;t052kD0q^K=fe4(mdWO%%seRQv5H zYUT9=nL-V{VUn& zwEQREPo+;sUN|S*O?I8*SB&fb{K(iI^Z(-dKaa;VowHWI<7Z+PazFfRm_yD(-&6Vu z^vf)u7w*4kMp#5od#_8#%t-HdkhviA6@cS%2}laFL!E>e_@a)2|}6#ww)QqUU(>!t zm$q*=a`ZfoE#!Y&%Kt{D-`4kjS9=_-^tK*x%8<#@sr;mR`X|=orx(`m>h2$=)3eg* z++&O}eJ0}GVL5c8Ynyfd!$(53a%7g@=Ac&E^T-9Lp*LLMSG;2VAK@j4V`frleA9iB zy(>6_8_`Lc)W5AyfEx`l5p z80YmX_wk1DPu31Dcg%KFVJCKDFZSag4x`TttYLs-1AITl zz8^9@+V?|dgj>lr$JHJ3{U&`QHNM|&-vpTyZX#1#tRK**O@M0WI*J5RNTUsP>hpRu zJfr?W>{E_w%NF|og*Q4Ti#$%^6wcr*&ZA)e?qUDpvHh!_e^kFeUjO$Uw(foQFPXWb zeQ@DD`X`P2{=?_P^_l`M&y^a`Sm@Qq(#} zpW|+!=C*qElKVno|ErhutEp>KOWo%d_ermlMq&NI7vI;u*yR3&Q@A6Ydw78UUvmu@ zj1mmP|9t;V`F|w)AIG=3f3zTjRDFe>wD~R*oyX5^X$~MF~ z#*HV;XLQVN?8Scc9rOMC*q9#rVf4PtxA8jPM!R`9KQ-T4+)*TuLK$PIOO>ca_H74)b*_T|5@Y9r|9FJ!S)~v z^N3q{C~ai z|I$h~8~?wKZHu@*e;aD_t>^whSud?x_VQG+4An_}{$$*XAnr+!O0vnZ$o0$;pAgp& z?dJ~b*9%wvje35FvY#H;7R)Rz2@B}i4XzD~=u41i|K`x`n68!V-*xUE4M?DIkFf=4 zLJG_LX9ZSaHP&K1Hexf{J*SRc?0+(Qi2YCRj`x&d|MTx2yXpRM9O*aR{|R#o(26!x z-+eNaOJ_T(uoJaIjXxOqWY|rQ@1y~H>HCoy;M$OMOkBgT@VyQ9K1TD$`u#AfQSrJq z2r55kUznd8YY_j7G}@3w9(Av=fA`4$`?S+Lw97AOm$Q?bkh*PbfMZVK49+6X#XnD8 z#AUS0M;-D}C$i{5_i6d_ru>OxyV$=ksQ>4x>&cd->VL9TxQ&eCSFTE@7uRtEtuJUl zYZvs27TqGLvNA*J$(8r=tv=&ubr|OWy3B>)nv~yZrxtn}s=;hXq)K zCFuK^>v*Xs^!%IeP`LLg>s*qnuo`PoJ&6y5toVd+z~pAsK4bpx*bjtq`lET?+vz(| znD=v9KhO#NKxEz9>VL#NUK41fClSY(#kG1z_|L0vJsc|##|%^n??&av9*z%aKga)l zNgbsg%c5(Ky6KQQY9BxRYsSCLR{!Dm_TRm!{zDs%^|0S@d=UE`EhE|QXr;HIu)aVs z`(0cXUE;d&Xno(kuH~qAzn?sa!>EpNNTBYf@eAzv26lepihlpo@0-7P#5tdFpN>nT z4Oz44kMOpEsX|UV6QhX|%<#G_z!! zADAcO`hO>5Sh;>GhQIb_57%Ff_3<m%Q6UV?vxmpG4yeW5RXEqz9FT z8)P3^>9@!`$k00n7#C1n8tw@{Ku$O>zH7KXeDTMw86WVD@d0G|zVQKM=56BxMj9W0 z_GgSQm?E7S(jmKs`rl|{0Hl$ZR{y`z4#FMZ=U}n~!!QD)PCKK1rC*3 zTW(A!b=)|N#{|@htHDHi!o4>eV+4&hZVwbhRg%=>#BLD9){%D{4k62bWs*jRrqNm<8KiqLkuna4(3ahae>rq&L z{)GIGEV}S}>;HS-%{cxHUmsd#Xd6ELyYZ*lHT+Z9D6P$SygvUG{wc(H^X1~VqY4$i z#nw&o|2lnF!VT)0xW?3O`d$?F4c(y~aX>o)`^8l|{vdf6wbH95kD_LZHN@7858HSB zmrz-!9`$`y9{9^p_145tG4wCP4*EZi{cza%%!fn5@hSY%o{xszqdywHIrPI}*U*oK zZ?F6Fu=m=ZhqT}NUjMuBotxHwxHcxdKKswZzL8_X_gvdSWmwkld7MPg$3Gn2xT${s z_(#Lxv8CakPn3p32S$ZAo*iXhIQ?kdBf^1cBf@u^M}+;a4GZ6?9~SoE^$mX+_D-8< z{@cW`r+H%d)|3fhck(Yn@86e(|2qW!J8SaGW2c;B>rL$n8m zh7lNrtuwUO2Pq%ad!@qFclBdxmmQhZKYaf!`ET~R(A@9kaD33qAw70WXzBO&Av65% z!z}5xVVwWuKB=8EMEm-i_E!|1fQgulsW_rM{lP+Z_?8fUXl==w;X^|0?IGc)H7V=H zT5s~rC-~665fX#G!Dsc2kQ}`sG;R7iKg`!d-8Joxo7x{pJgfaNMf+oh_6L~~o-X|| z%)~6r!8|O$BDCMv{@A6C-l42as*^j^&4_yx=WDbt7TW)9sr}!`$K&tM`G!$ge|S?- zSR$QeSbe#qOW%8twkcI7T2(9y_G{wL$yKacQz;zP8O$ z_rK8n&lP8mU$u0OB7qcIx0s`cG(FpFZr@^a^`ega0luw$=J;9jzpLIHKXKKWqHvNt zh1zKY!Wr@`YUmB4z1Q0V!g=9~XgcS;Uh$swJ0#<~@6ZRQ9gsM!txhJD35|HP{>Nqi zxr$y~#|`x17Ve-h{-2E>eUF9pKMp~e_a%Mo`~yy6otOOlJ`Ge zWZkMEVWR&}Mo&e*FqQ26seUQ43>7~r4(ZeE33^8T5$6ERqR+uRbgE0TN%en+`X4!Z zzJ`6u{%?MPefm866fN}3T=wZY_9@!8v2W3dEV|H*9P$__?FCqbCD`vdE+bc973wbO zUq-_k_Vr3OD|P+K@%9xEjai=?VXD@c)zaQ;63T_hwA?bFF?mBknBD<01;} zktx0><^E;itLR0AZzhMzPiu$#CWIT}`cQX58~ogR{y**d!v0rN_{ijo6nV=rcW@66 z(0{eM27^(8cHee~{1@|H7F~ND+UAA*H|fXN`OOR2|4ZdHvamka&4+ofjm$RtZ@qjZ zty~5BV>tUmIIa=eMW(eKM)++MN-++t`{Yk~c|5)09r^3Fv3~T4XcA7%XMd2%YsNN< ztJdb2N=`@ZIeiJ_OwYdRz-+J(=0^uzz(UeKX3@sjZzw*InPoIqw2_Z1-EWd{{;9L~XZyAjrL_ z$;9!(56AQG7e0ulk=li5oTQyd9u^nJ4OEl8A9>OWZrUL)t9xVkuRj^ht2Ne9%_38(8=i~k9k61tFZS@N}PpeNh+ZhgO$V|7FyF-ReKGQ8>v?Z_-{#vDr^a=L{VD6bI<7C$FXhv3plQhO|Rr%Q2%C@^DiKd z4@gg8n@gt`*Kq>{`~PX>F8z`He~W$xu}zWljJlg2+W+^&Rj)JFf$aa1_cziS?c`w8 zEdQPTUm`pVaqq&ohhf3~zpefMwD$iL?^(M%K{h_?z3%zl{GSp2GYX{`hw*r|o6j6u z(az@YNNWGrX#cO#{@#PKIf$YuDpZ*K+JD*wgtC#&e$E%NsU`(M-7B0o+3o+5uQ zl)q6uh5t(YMr=koYWMKpKW)w-J>ec3GST*wKajesY`QIP?~})o`;k3F#qY&_yubWA zD0~>zsQefA6O z$v)h|9kecIV~M{_whFSP}z~fD987(6Do?zL!3X*`)_|1N@Luoi^4dv?{#Byd<$`p zv^Z8}ytoON7=Qbv{Jray(6R3oZJbv^w&oQ+tyh#!uPC2hQ9ivAj;(wpG_QFj9N+Xx zNUwV(v@CvwZ~c|f>bSPdE1_=TE1`b*E1>}iG$M&6q%hfej=J}$5&8&BM;T_KPMY;- zKmv_>%6R1ReMhE*}| z@NCfSn69T^DePTR?R<~+p{w;gwmUBOEo*r!{oQ?4iQ9>$=RFq`o|9wi==Jj*M^9`K z?|COZ@20!n?@jf!JXQ4>b%Ouy#eN*bVN~Py)}LV~{L%Oyeud^k?0+&nm;Jxcx4BgP zM8@@J(pR>GqtZzrg)~~<*%C5nqh}ZM56q8a+uVEPfHGo__AeRtvN$QO`oNZOiadkb z`Yqusc^)Ks_wC%5GXAIERC$Ea@#dX}klO-C7KVix9L9@>kI{}{^mh{yA*FZkaT^*eb?Ina#b zn>_1vp8sOcAFb0o|0&|0wPufW7GVjN;nDb^74+C<@5E~QTKwVs{rB&Gtqcn59n<&A z^3d1i`=$4MNV)KD`1e=X6XjuZjQip%p`7gf#4F*BXS#}>U@JDDFs_YEsTbp3F_p^9 zs!uAXcUW)7aW!NWxf8YYxPQ-X`d;kEK|I>O=P*6nt?htn`ccF_f!sFrC)u@6{r3$2 zHyWmCAK>x+yGvaI`!wM{DWuVcEb=&sQ)qu%{g2MO>VI_6yOCR}{#U<0Zr}7{>yFmG zac>#6aop>$?UM0-&C193*&k=5SzB-X-;sggEIn~w+XUz76+iS%oYpoup^ZW|UT~hD zc!%!eqTes0wO)II>_uFMurMz~`CrMmTQ$UYL67ss;@rD#JihhXP=S( z>-GQW1G+A)8|cF=+`&COz<)aa=#ua6i0=<+v>>iA-nz&4hidkC|F!ZI2BQSEZ1Wm@ zK*Q*XmGXxCJAyt6Dfy*ITylfFf>LqgFdh>y5tA_$)A88<#7A8wJQH(}`>eiNvNEu% zKVp4AdK{0xh|J7jA55_}4t*K&-~6Eb#a2LTqvpqNh;tQgPVtxPv&1Y8*uZxo;GQ_BHbV z0C`k7@Ywzn&I#wqW0&-Q;<)3|D?R@$o^J3K z$~n@=B9D{!f7J)4$Ud^~Yx3t;J-?q13DwHvGvaGDeIT4A&!cvoa+kb}IG3UU$-CMR zH+@@gYeSH2ew&4>=*4y1Kp$?QZXx@4F&mVvn$WM^m}GnIV}t6qPTgid>|x*HIMR4* z-!`*v(Jrpz5c}AFq8;9)U%nfU*5^wPdNSN`o_lzJ{$H1erm&F*@Ga1@8~hi8=_SZ7 z&y2sH<^*?x}%H zbcnwq{)GEOTx($KI}iQ1uX$rhF+`=8)!vnm3{7nCK%B+u>-}99x?f(ho*kExb7={rjtk3+l0bvxq z6yq=+eJ?*5`s#c?8+?nx6EPW6(K^XDHoRY$PS1|^{m{$kGm-CM8{Sujq6-7X&B7ea z!+zJefLw$nsN1D}R=+hIQa@c&Ki$_xIH!I>3dfrH4?6e{X0xTyvY35s{9h~L{<@F% z|Cy$KMq&S-XVjY`)z66Q|1Wc%6eYO!k*7x!q4a~S{m zwI{;i69d8<5w_!KycSfh`@2we z;O|1kp#Kqetogg}J^q)2>Xnq=(r81^x2$itSpV7+e;&R)sKnT%4~DGxzQ6jYxnb@L zuW$Ka*f(WxsNeO`P`BfwA<^$c%Ic4X|I9s^c$1Q@>AzBU{Y%@!#qrW5N&W z^~*N@dHCV&(s1E4-e4)Z?%~)7$q2n z5$N4&J~G+!ePiy(K4S;Rk>fD|6EPW6F&$-y`}@r#d-?xo#V{Z3oEUD@|DzocedF=^ z`kvBvEPerU?WOhrmjB=8e-Vyz{44Dhu#Dd7*=Mw8R?y>|!%nQCuSUMZw|VKI53=jF z@9&w>;kx@-x4% zvS?2lACPe#efwFmOSpTX@d5LlXRiKDX*ASKl z#}e|=jPvH>Jo=OL#L6+D0fqU0zg2gxasSKR{|5JuQ;ta=7!%HrXVI!0I8R0 zoUL5IW#OyH&2SE7NY^y)Ksi$P!kADW$5=v?DGWMThlGAEoTkDb>3$MJgKN{#ksx4Pko_6OSb89(&mQ~EES3Y{mO z3R!fa8#&~0>^091$HzVu(&mA-jDAY_`jqnZDgJ?{Lfvxj4-H75@sjt4rqkZviPG@E zHTGXGuV64rFbpFw3Z)o_XyaGUA05WiYc`DzwfjC0CeSBhG9J&*zp4Jcul~KQ%<=6{ z_gl}e)o*0)%l!&_actx7isSKTiYu%?@WY}oi#`YQumFqD_f36|v$g-xuvGo79BbSp zeKd7TKcnsO{Q+T#W0zqCa$QAX6}cK~(f+LG?>%;UuURts^tvy2{#QKzw>LDIH1rxf(DP;M`%7~pHlrNdQH7n@jkjaF|JU|Y_~Mwb zS2(@T7z^?s4x<{a9nR70e&~UZQ;K5Sncdz1hFE&hKpeM$S| zwDt*F9ovTLE7n|-&K=yt1Jqt)|KGOO+Bf9?L9UJOU?6=k{%HOmAA4bZh4Q1s@52!1 z|3&}b2zup@*!|icqv)j=hw+$zi73qf+r|HXh<|sVcDeF7i6(hAbwd9CzH!!$n~Lcu z!%WP=9JFup{h{+M{{J_%>8@zokvYV%-p7<7%{V?={>Nkg6TL0Q4Yz-cbQWL{mY{W- z?`M*AeCXL1)E`(uUxoZ3{y%i13p-vmhe=(t+HY%7yM;fW+=!YD@>M_IAvOz_qv?`- zg+?S1Kj>|U;~Td7J+48xldP)KzV8|mcGLG_KMvwBs&Nzvq>x4%vgrM)d`HIm_aqs8 zfv3nbIE(YRh|9Q&x)Nk>$ri({>ggzP`G`*=fBwVUoQWn zYoz>-$M*l9%Kz)+f3%_Z$FaXoU;p3p-FMkKuOq0-;C5X^APTuJAcz0da_(x`fYm^l2vHkV~=)nH!{~;m;21R z&%MI?@n~LP*8$gwI@eL}+6wzGOmqLPJ-N{RyY4tX;-LQ=Mm3HiffUkcL;G#tpZwj4 zEV>Z)c8O~|=J9y{g+cBgEj{Xgw4zP@RGlmiS?T0)5~omm!TS5`h%@xWePcIn7l*U- z^GH1{Zy?dGKV@K! z%qtU*W%wVYd3+oH16t<0KEyuCHuNcH?nvhz9-u=1XaB#`Ucq4GJ~beeki#$ny`R&* zCwo3=ZU9*=PmLqTV*)0kR+$>hzRC1B_c_jio=TsN)CJEENyjvz*LT+w_yC?@|6}X@ z_v#bv{xZkU#2n1S0`&dj$*_oA67{cnr)$jfr5EP^4;c_v&|AetAJZy&_8oo0SWRDx zyyucr-gff?bdiPnhkaStcePu+&Q2}tuS2Fr4h*yWXFWDzGs>|YRoIDkc6NvQwzF9M zJ3#$ATK!Arke|Z;u8wWSaW;9{u`TSyjPY-+XhZe0>|g2Z#eN(_t+=?a-C=s%bEpB; z^rJ}aWB=E)|M>wL$@iBJiTFFxsQ8j^>?``X=~-0%LOuLN-x2*JPT>sBqPM6hoF{u) z284^`Wn7K%xQ-j>!!6v2x_+j6d zP4z__gWW~eJ+1xYUK_~A{U7t$__IBKZKJKe;YaO@QvV-^icd+y7{Cei_WSa`G#}3| zp%>PlQnwV=AJTql#&LN){iga~yEH?_J)+vk%I|+R#C6vvN~_wKzR6@w(*6p@=uM?h zNA8o>Ug2X%AbzvS(Y|3aRcxNi^Tr1_W+q~ra~7FCV*Cs_54jgS^SScn2Kk=sZ$sL?`xl{yOQ#BQ?v4cAMu#7qWOyaO{U+Gzi-Rm z_vLT0O}JedQy8D**sSuUu>UZbL)?Ej?w`HUc{ZaQ+fm5B*YtbQALZYj^xa5}bnio5 z1KBuM`{hON2ZiyOem{uAs79PeT=5d$zQQ=;FKnXI1$}fBFe{ElKdKIWN>9e6nJ%2{H_iNT8CePy{E+c);m|?OP*Kq@}Zzrz*-AB)M`!;Zkeh2vt z(!`_vBkQ_ngnILd8%91C;#~8_NzaAk{O3XwQgh9__n-HV@3|-Z0R8{oH;;Dn+B>d1 z7dpx8>@weU8UMcd_4NGnW%}RDU&e81r_plL{AIM7-`@6)w&NaqnoFlrd0O=&V=n2n z=bp3v_j6$wJ#pW)VFY~?Qd3+T;u-~wC>57JR2IgOdI7mf&l%_VobuQ6-{AQ>E={&9_WWP){Jq;Y zvTld`wNL&+qQ*C^4Vk>)8$Tz1W3O}U$3f&j@&EKYOjhG45@=s2e=U};*T`Qh<*zOB zIhhmAlgBdbUuhoS#{NaiI{6c=XhWR;my%8zZHW5-wJv0TOfkOCcb3&A?_8(t>bHC( zBW*ldzviU4IREbyc?Pxe=2`MQYW%N3I*A#4_`;XbbW^#3M#m*lSMUDO(BU{V(vxVK z=3QZ{a`I96d)5DYk!v3iZjgOwe_Q#BPHnF24rTfwd25?@jeJJ?Yc~69A^U5o@|XO> z^&k5_VvjfJ6y`ttbU?U6ulTb1h3xzBbKwEmL-ya~+@H2afih$uJ?`5+m~7qhflxvY zLuP}1S~9(o9YB_%;D=c5x~{mc3$F8$>s0QK^V@h-8{d81IVR92Vlrxt>#k83Or^)Y z*c*zCGp3gz?#a`nK8bs^H6 z;n-5`kFnYxNYBvzkY)z0{j@)j-eHde>FmUA>_w}4jC1t&)3evq-v{{L>4%Yj$M^+w zzis@&ZQ~lm9Yq2uq|t^f`o2{Z@?_6X`9;XyU-FkcqrQmtjQ*}1e^uP4_l(~PVNXlxU0f^bjMZxirxHGWyFVugz3T?ecNT^Ow7U@%)u_}+7#Qi(Q!%C(d(D9e~~~V z9`A2Qzu`Z9xP?2ohX?4t*|&&xYuRo4y~% zj>C9Vd|e+cIT1ZS)89|_e!+7mt6wY$)5$W-#4OY<=YJ4Cho0Es*EM#TB^ zo5^y-@$rTE^XilB!f_0F9QzZ;05vKrk~n;1NciVX9}I_H{9t(F+!Nto^M4E9+wkAQ zfpeb;-|hLYVgH){8osmgzv}BN3$HJKF6>?RT=@2s>0!^h{~o@z>A#1q`;>==l!r(3 z|2s$D=e3u=Q4)63d!MrJooBh1-t#lx?^l$U^3xAI?;}^94BzkI%c<`l_WQl!M@6Ca z2!Fw^j8FS{k#gM{9^$*#DU*~nd3H*UepuWoWm)tUbSjHpy<+}hzv<>iuvvd(Jk8Lu zP&Km5{KM&C$I|~E{!zPW=hKrzwf`N(KXptAyN6E=-<&cz?AkV2f9lk*_tKz{`|-$- z@cVa~jTf3<5?|;{1W{>7Jj&LU^r@?W9{8h#-ACVzm%VUA^UC(`)(7z zKAPyM<({SgUPUjiqhN10dtbOA-2U2t(1FfF`UvzBbaj{;h8!C9_CqEcQIcF)xVLT>a zA|_)h3i~T)_cT1K{WDwpXRh{-_tVtO{>OC3lwl@jVGibD0T!Wsq4^ocNOY3fb=oA_ z4&67^Kgi=)lKsDn{jZ&qK4knOJ>$4mZHG4Pi0Yc6-y8q9M4GkNibBnsMPV5|G21=8 z$gf0Sja0@xi%X(Wzft8^jjwpZK6QRe@8J6<;~wI1@9>TE%_zrz)IMljs2o|Uy|G-m zf;#nky|}`7y!qN6jn>ii|0?W6T>pMIxflDU36K1Ll^^%~empu{^;<8l;|BU7=KbCxdtS0WA=&$lf#Dwc0R6Z4 z-jPzlE!JZr z+Ry3#zoP&D9sU2;^#Awh|3?ma95Y|PdH$Gi9BH)N)c=oGw4qLae*G@zcQ1*!rU980 zZt8IU%$Ts*Im)peRoIE$*o*yW_n(fWe*T&-y!dzc7wHm$o{Xm|C-;GBLjs8<59mtFW{KzZp+%#AcKu z)}a~k+v(YQb;}VpGJPlV>X00|Yt%LBmY$Y@VYm3+zb^`V$*u0YFrIU`b&`b(`Txtx zS^8mA<0$%u6oo_#OFu=X(S|JI8vZrn^YpkTVT0>ANk4_uX!b9XPxC)K%XStQ#~q#} z&!ctAz;KbgjEw$$bZU+DRHs?@3CiY3QL)v&W{_nc~ z+&~|0;SLJxUu*wt?XdQYXVB5@8D-QBe=7f9QU7{Y#~qh;UoD990$TYL+MKVt*qUzA ze1QI@5yf?<9*zHt`8cusq5WU^gmRR>Ag)Om_b^DRzvEiSgZ(G%J1QZEA=;HA$We&n zd^*?pE;lLvg~uT`NxNW%?{vQJ6o1tJtt?3Hk^hmxc>kGziI|M3n2s{c#P6?vt*yO5 z-qf~@dxYir0ROY~uV+bP4(6fyzPT`D&D);AXnU>DmtYwhrYWD43y;<&!bIwcagk|-oFmv zDjCN=^pbsi$2Z8TkH`H1OG05j##qniY4x@E^jpPYBY7RSa0jjT+5abs!##RdpK)iu zC&L4J|E==Ba89_J?BWL)D6W^!VldhBTWi^q!!QD)P>OLFj|rHFzON1mlgX*5o?9HI zlVzBRS*YEj|M_|QJkSgK?`>oM(ib508v9o`NyfGMV*Qt%qTlqr`ma@=f5lJv937i6 z2B7mCzW@s5=ucz0%-&kbPr&x?-emnh{rUeNW%nN=RaP!~f7j_ooXa4?VUU@_FfBoX zT!xDTd66K~PLLqsOgIDy5+pc5f^%_#6C_9&Y<@J|&Or_$A|k^Z<-x zT~!h!I6;C02@)>BP9K6?Bz-u@K@Kwgepd10ozZ*mx$hs}JkMUW_o}`2-fKPUSw9f_g!A4d~kI)P>+u~az~G8f&l?>(Rjv?dkz-M(%=BDunWQ{h8jpC`mHdLdo)%<_H?GAce9F5pT--G06_cq-<&Xj)b0Q-c6 zbsr9phmg6aPDdU=`k3#sLH<`h#2qJ5T=!wG{Gafh*2@1^t$hr&oQVRC|i?0X^>Tf&05Ef8KO|WSB=7 z`uXzuFSmd1K`iw9U2;u|yh8ROuPiF|KkADJd zuNv9JIDfRMhwZt^23=*hPw?;AzSh~=*ZHj}wziE7b~o7J_EE*Clvj2093!ujj^Np8$B-%sYhve=@{r?SoW&H*p?EgRD z_d}>b*gqnVBlI|m{SV*Men3y59a+>Le3U*mIxa0u(psF~a9;XOtAjY_I8Nde&fpv_ z;1W8N>75~;A&ao5YEFK9;(v$dv-6Svy;AwV$-5!L7>#x^HBlSBczSUSH;`GNjxg5x z1N5xt(8bQ*rr$;3u(&(@M{BU*>JGwc)Cu~@lC7TIaAOt8K`5?2FjKk)8z<&?7@FCf zj_k{f9(x`y$Tiz3nCcM|QaU5i8t@nD+d%fd5)6;vso7c5l z;PE;}&GaOikih0nYZdT?w`z|IeK}SC@aI@)FUWsJ9&tbQE7{+5?C&P|i)?yB{z_}# z&*`Tl%bYU?<1hh}Fa^^v104$<^*`!M$fA3wd_GG2!{z&@)IazjAKdS!xF67)|6l)O zxj1Galo@l#^v#lJ9yuSGnd&3tVq~9J#>JFvPbuTbf^r~FF7?}TRQ}>YEdTRA$5zl+ zVKw^Lp*3Xa|6fb?TEAjF8S?%XY(}vEJ=*1qWxx;h0r;ARj?a3w>NF{OJ3^hQ*>U2SXH8ZM%c6ke=zI4; z>?C;#XVClY2eETx&*%RfyFgyTl@N|=NL_tD_QCd#8^SWe(s$mE_0hAh^~bs{z8||y zulyJ9}?x@5dS^z8{N|O*7w*CCFySNizSEHbK`G$KPuU zxKHm#$u?ykI=1x3I?=ViKbA%J+u~09kJkQJA<-XeS@wRc^=g0Y=*|9Es;58JMo-f- z7yD!FsHyFb4HC}~48sW239A*yNP2vmWkC1t_hUKKkCJ|AZIsryv^SB71^xp`%yP{f z%)@*v#9}PPa&%l&{-Eoo@~20<$RUpcS|&@s`#c(ZKbAt9JfBtuWXSgG>I#Vmu@&OU z|Gj=zat-RvK8V!{UrUcaco1v6^B}gKz7a|F@Md;CasEN937hZ9-`M(U6m9?3C!z}H zRQ^_8|4nJMjb8P6_WMi5($IHc7xrKu^4jX_#npf?My^=D-)sEeLHVCdDmM-|?+|Km z1eM0h#>pg7=+OS(i7*E+i*9ZDIppaDw8+=3%7CLMwD%?SKXz&lM26m83vpyoz;T?! zDfInb`yJV%|Lh#u`wevqase9%ql2u>V@ADOX>GaGpbsXHFXXh$6 z_$qz$+bAeg^2m9P-3!z=g!O#RcW$lE-hZ`bh~s{gRQkR#2tzOoBYgKG$dM>R-@oX` zC&!^inKpr}J#W7?ati9`)5sZ!%kPc1yth{Q{-U~5+IvLL|B$c!U++JZMt$Ecwcm>I z{K0>kY5bwPRck12rFtSi1NKXqWV zhaO+R#?2iV?V}$+as1(4w(S)*PWV61z85=GdoT9R?gz1huilIOWYL4zfn^V3KOS%^ zwqIHBquPIq?R)K3?1yjMiS3#;rC|2_8oMSqCxUUog!+Bzy~sT~!i-hMth z`ii+Vu}?;Acb<<9`L7xr!PW+O>~KkOtl7`>JJbyLn<(r_cQ|Jb;t6&8a4tXYXY6{d zH5cE#JUBXX_Nk~&zf?o)S#z8}8pTgM8#SIDu5L3lYSLGf)MwPZZm4kxeX(uh`(jCP zq)^p!CssZ5-(!{2|7UFb&i@(v#|{4;+tG6?7S^B3`u(5ou|>+1@6G(T*v{AfE%t-C z@5lBWe4kB!I;#4{)6tJw|0A~V-hk+bOa3Fa7eCQ{ap0JGlxvUUBu?QSb)++7eIl#@ zsE^^a{-?|O6v)H_eGGT>tBkU4pmQ$Z60V>Z*Kh-U=vb$pW|RJ>r}W#P+cmkE{wEY> z>Zf^AKMjr|g|-Fyagag#tHNfNMz_Ut7x&SR%%;*PExcsA?>AklZKpIENFRj4-cs$4 zrFbQYK1r6Dvjb8<2dYP*nGA$N}{Q!G)kbyoWIJa_5Xj$ zH>>?(g7fdta?C>5dvFe!Cg+j!kx3h8 zNiIhA6=mdHWh2?WMA=B@9oNi$I%=2Zan4za<*3_a4Nu`K=<&CojvBW-9j&6TM$&P! z;{@5XTYJm!A<>pmL!zyt^`-Mo*Er|#`1-Z<_1K86WBg-uBZoW+s88ts=L(PTza2@EUr2r?vqU>mT1xccwQZfhHVrPRjkq z$t1$~{}kDdbdPp{v~=g-xPUy{ALivf8UH`@(fo(e;*j3r{tJtwf0^`aGpMZRXS?ns zPT>sB;Q}t9gWd1s&v&7?zGIsGU&sE3{~=qZO24*(qffanw6XnRZ-@-q&$IsxgQF|r z>BTkNKymzk?ck`79uI8;#s}P{-$imXyTNun8UH`n`_R@K?BRXC_oJlB`})3b_$_(m z52eu{$3rj-BQO$W7=z-x4EI=EhjOgp{=e^KGj2o$VDqSp8!-+lct9Y~vDX zLo+=&TmOf3iRXxC9_FL6qcmDbF2+)HJkb8}isz3ky7A%h*V;dx(*AK;9sGp$kIU)? zWX5qj+4CFz|1-w^|B?Uyv~~W)xdN-O8f&l?`A?Qc8%b5*sP7B-mAv72>q_lk?0K>M z>kaF=9W+)$cs0^{)ji(UE`zFi^|0T0AItbI(iQe@3u7p2l$QgYyB<5R3w2BMzbha2 z(BsO;M(m>>Kyr(+dA~Axx3Zc%BrNnH)R0Gzp~u5{2z>}$YUn`=<5>c+1Ci~BR=U-+2$(FI~ z|3vnmOwDBf$+Y7PSzQ0;jSsK?b4i?a!fJ7a9>321_wdi?d2Omm$IXrtWE21Rn(#31 zu{giLxcrRwe?wRw(%NRaUiDp!HU7@=UF7M7iN2G`z7w=0v=5)qK734D0NUv3)8e@$ zf1)EMf1?Xo{N?^__r=+dl4|uu?34cnkwY*H^{;xq+D{s#JwDa*mH!i0^=}~=^8fh( z(Fo^_L>b1QZUFyLei%oOzb0?^ekaf;A^EzzA?)$`KU0KF!wi&T7J95#F^4RUcO0sG zKp5}X_s5S!^MudGLgdZis{92Xnx5*R|6KitKfm1Z3S`)+Rpe?s9ZUNjnWj-+H_Xh(2Owt z`GE5dp$10~M-nNtqvLh&2VGBjKX@|#k6swy{iYv{|0Ba%fo&VSUu5X*uWA4Pg>UO! zb^i|S0B?QdVSCophxPBQ_zO6W{O?9Zr^qwt`>JR8d*egtq3`noc?nn0i!cT{?0s^L z{&@aE)fYV%{q0HbKePdH@1myB(!X8*_ip{fi0hAT(l4DjumAh3e&Q46w~ONj`fwX} zaUcCC`L6W8rvDvX$f6rL2^fDA2=a=aJ)~DS~juBR~?8RstIRSM;^{0|k5XLYxiYGq6ID&W8 zceFn>kCHCOO=M!Y{%>h+kmg3j-xGdOIGX9nV=sPa{hw*#tg^O6^|!38LNCXEy8h4X z|3m-JJ$c^!%@WTX%)@+Sq$@3b3+dUneOp`X13+Jjg5$j7FvqWZw{~4&D^Rn}J`QB< z^DjoL$u$ViVzKwMmL5N*kNZ9EoeX;ngt^6Gj`4cGZA1mOp&FIXS=*o7g+17Z12}{l z#F0e)n-6`G;{2Bl=DI|s5BILH0Bm8$tH|>x)r=JdTq%g)=yZ3%G>7 zFPSe$_TpNQ@A9L`u>MaUc^h|eAN>e>3YEOA?86`oK}J}5k#zy-+0hT{z4@Q2mpd+u z_Zh1o*k_Y;hf_5zi%t|$2xzy z|CsE*$bHVK@m~kXLkMFBYRDrf?tkaEFor(vIEm&>()F5jlA#a1{-*f{>M4!li=%0` za5N){lxy0NMFGcg5~pwm9owaUr+XyBUWMKCus2X%{igW-Z%IFnB6ULgk4YOcXh*73 zIWC?HxP&Xn40X=)Yyv&YW_RHl{RYCC{(0n_(>+!{$t(H^gx^Np+>aFN;+y5gnjU*2 z4!56QM(?$66}^br_|GK|4EOu!_B_UBWIhcp;SLJ1SJ&!QosN=f!Ky)p0&%)Z?Qx+g| zQClBco@bA7l%B$TarFL+HNMHPe&Av}Pc@`9jCpLrE@6AH4+n4vH8_Ie{$JKW==q*+ z({b4UCrPHzj%?7?;R@t&oWv;<=l|{3zCwRI|L+|A5}vFddQe$~`T_hReqrM%{^xLh z5uY=GWi{EkQ9N&o*Q{^L6R|NP8`ss3ZK^rLB!xM%whgthiZ zx~2?cFb)$i2~#i)#q{6#i}df-zJUT-q_g#^^m{I0t%Seo|L^?p{s%L}S$9JErLCME z*Des&|DQ$A^DmzC|A%{>t`Asv81s{6f9BA`9JBdk)wjJLb3h8y)fe~~xv}0ivh=Vf z>0Nn!q4O4FDJn~~1-)&}5BlTv?^e(YpPC-6A{)2Mk2ig@_k6o}y#8J9m$hyGobSq3 zt#;lTti^h4L_G;_@juGB#yiqCIP{CE zBMneDLYV)T^dBi?QNVGW#3`IXas8i}p6N}`_Nr(6z_TXvjtgYVMD@R^{5$8Q$Tr6x zT>odY@G#~t?0?#4uYsx-WA1;e-Ba9s+6IdI|7jbz;5f{OxI_kj=Sn!%UJ&+Y>ZM=9 z4fG*%!hc-mbJDY|?ybT5rr$?#{U67sV4AeKUzi8B#A4ize zpFnd=Ke9IXa_7v#9L&RfEW~0gMaRU@Z*7c&@eJ8VV;XYCSL9JRX#A!2`_^Upv7MJ1 ztKWL2erqy=_Tj>$Wx04(U=>y)v|)t(|JKm6``O>U)*qy=M}b|<-(!DovA^U-VKwWF zVIa4mj!ms5cc8fc-&xP~vat+~_n8lWFj5fKdk?MN8Q|a6DON|HwoWKv+p8F zZZ<|^%W2=o3H9r*`mVmMol-p2O}-U0bY7Hx<2=GxkM7;Zc1U-Auk^ny{jd7| z7Ww{``Tod<^G~%C-Lf_UQtQm&5nm0CAdXC0K3ORb(zClCwjrhH?I=u@uapzHE!vRC ztgx^LWr3_T?xpUQHNoj85%!>L)Xo(@Z=8?gGiY|4)K}F+CQkTmr+xp>fUp*6GG~l3 z65H(q7=F{<6~{T(Uce<>K`*YsIOnJfkM{vAT=t9*=HPWB)oI_q)+lN{>{%qFd6~2$ z?fJCfhB*3g8+Wl~D!V^eTPA%gJ>N7l>Zh0NG{*MpGowM|5LA8kb5Zrrl`HD|!yNzG zJ&qtpA|ot~GJ1Br{5MuV9eo@Mqud{Iqurn1dO!33X!qm$x5&qzv(K}0CSeMuq4Fhj z>3?dTI=viyZ%vD4k#mCn9DjhEkE*Y~EPs_p3+aoo6n#H5cAgC7#R@Wv4O~TTy|HJ?$?G-NBiHL9Q|nY#Ax64$of59iC-SpPM&qgf+o{duGB^_P#thSNoqPFyNQHB57hHC7!tRePvRR4zluhaT5kU0Cy z!~U=HL!;)4<xxHrh6EY;-^zhfsCwrKo!7#He!A_-Om^@zFnypBU|M-!*1da!W#8$y(f7yl=q`C5RX^qjo6nt+w#tu|M*YG{{)w$X=Bb&+pUErq z>>YdCV+eg13YYC|kK9#z-D8BXk*GN>{1>HB?Xj6rnd332qmLseAU<=Zww#$!V&crG z3CZCzqh@4Y*Wc~;DM-_&k*T@X1tiN6_7WJnsLm=VvysbzzKsd?vdzmR(xL{x4#u z)B_f~_7G}tB*cRxQfNof|KGw!qUis=%{C%`nE#6wakdWT{|?~m-sJz@VOx71`Tv)N zHSkj>K77WTN3mhz%m@!-l0o}HVMC4W6VF}TM?W%C zo#T0x?2`Ub(vN}kK`5-0<_*%nN%}EFSj{4JF>(aL+Q}oyGK98)Fm^JIF^`JbWRU^RUW)?z&}_x#5#emOnMKkq^XeH#j|xwj4OQ5__V6{;4t1G`W&(RwFj ztvYp`{Jf8T0Ef^hug3A0>m&63!uX$Otlwv?o-j{u^PAFxP@dMfuIlI71-`_;p(oMj zp8F#8PI^1CDBw6w;uOx{9Qxi3>pu>TdNy1C!Ex_1){!Nz1Rd9qx}(2|?89x`MMj-3 z)IaXivkQEi^nQBD_x;ynfAAIGs_-ZN@61R3?-u^=PX6z9{%`tY(Gk}Tblo5f!3W#_ zhB@vy$p269e{1>w{QYjee{LE7e=h(375@Ke{_lDIFMqbU|D8C~IsQM{?z$Q>S*e=_j+E zMK?VseR=76*#0N|x1=Ab6a4>U(vJ+n580=Uey_kbRI2mkQT2J_(yWQTL)b3t!9E0&p}(J!F5ujhXGceno{F9{3l|6Cz^QK#+T8hHbu z53JaZctW0Yd>hTmf~4anGOROrUs%|Gx3W}Uy!OA%@A1bUJZ$gJKVyEm-}9d;jY@XQ z2N;CDXRP%~4nx)N)Pw9UvH3Ew3$FP&3d|4qZHCa19S@|n-`pC7&j916td#$I3HA%u6 z5F6_$xHtGyV!q^aPsOh9vGv|0>rN$G=3{H0jmYfnDfegF7#> zQ9bNGx_7fp2iYk0HSG1%GMD|s(PeBMJJt3o+jfinLpy5TG|oUg`)~kAMq$fU4TK%aa&nI^Mj0hz15efNKyp3RwqaM;)YWB0nXFBHf; z>SOHxSpPAaokSB76WM=ZVV&cXuKi&9#wo{Va1QxT^S?c#P89b?d{6p4iyZRw0$Rk? zI$8RsO8-;R|Ge}gLvKfz|5#}q>Z(s!L;VZNck$LG9?pNfLXY2ZZ`bt&(61qR*}Wlw zFc;&7u+(sU0OW0CW_oAjeWd+o*G2Ub^gAy3fqUMhU23IwPljz5K3_KZs;bgb>oq~_d>Loz7)%`0;{kZp>M7@{@|>>GRJF?aewQ{ zjmRF8#yiqB)U&%NZDjuWSXAM+ZK%c$?7|-GL!UnV17r_B;Skwt9lsj#2;xYh*uJBU zC*Ne(r(s1J{ioKufLl=Uc5mkJL`<_wO}- z|Bm_l&S@v>^>;U*aiacved$eO_21K*$)q@MyT`k@kA9Tw@f_rZf#e{hu9ihZ$YB_P zk;vTiA1I?|r%Kys;|%EIP4``W}_@_(PwCh=SCp2Di0DJ{+iJVdWS>NRt`$T*VW7#a2Yu*P6JJ*&P^>@PThp4PsY86Jx!(WhVan=4z>kYi)g?B9EA3fq-HGd+3C7-QF#V;1IM9_C{q7URkOyMx(XZT8*BA&&xD z-e&)B^bJ1GO6~8Pw7-)Xg#CB-c@{O(%@q*WK-4YM9#5`7*aN6>uKU6&$Ey+c{z=yA zCm<7P{?;20%l)th=NiASMOcG+J-HE8pXc{~HHs?e+fa=i*o8fKvj0z~|3OoO^rP7` zNMfIJ4&V@Ka0GEAkwS-O-ihKod$RlD!@5;o{i?t>d$K>$+w4Eu7RZ0dpdF$ApuquK2SUBS=m$6sKKl^o4#NnHM3}oA2Z(f^2fXy}xB+ zoA~eVu(6onoJwm0Od_XX8fKszv(Wc~epYf`(AnoO*5dJcj$!?Q`NDhk%`7Ct`Ws8b zdEeAm^Godj?+%WZJ6?fRSdA`quPi!h2SlCbKIGNC3h1Wi)Xk0xPbJJ_7%CqjLvKH5 z9z)tZ271^VDy$Jve^5T&>${~F+qd5IezC@Xti^h4LbFxpjPPgnVCflV23-fAqTR zVcxxbgj#x|_JYude$w$NG}FUA29M|Y*S{wH;%VIEJhDmri96E&zp!xqq0cFILS92L|L>Ikz0!YBdZhQS_TTj%*Ti!JeYlOxI_bY^%mY2U zK>Fu?EV@tcM`6FXcl(do(l5N^N8UrLbr{G&sGIz;Xb3qBwVrk3VCfoee?-S4(R^ON zqI+x-9@+(O2%Im3H^8ZD79GOn}KP~Skytg#_pHu%mY%Otdrpd6D-z<9AKVS~o zHCp`WMsB?FWvDU$H8;)u|9NS&y=_f2&w2AvC#)6=>G3zD4OQ%Gb!b0Q1~ntGQd)&C zc1~|(KK+-!VJ({>(Nf3Du>z}bxS=Ha=>u~?j+I0|8>}9p4Z4qS->Xbo?e{fUi^|f{ zD3A5@u;amZcRgMIj5|H1*shfsrEyGKTS@0kDbJ7v>vyhCBV z|LU70Q>c0___n3d{;MORALGDvYYk2x6dinE9mZROqC@iIKg*Z9$CpLlKRq(qGqx=H zfx7lyYXkmp{IF=>^CRrVHZ1yH&oj}^vCl;RH1U~e*F9sc#C-(+IQ`jZ$KGe7%InWX z+Xp-sRb729s=oDX)b6(|woZN_+BW=!=$(ersNncGPT~~K;3sEGt!q^p{m}Xzd#`>p z`Vsb}KN_9$+Xeh!$KlB6uSadTBFZj7T(K;f3S)gy}QFV%h|nk1}d$-rm|ISqB{Tr##7OfJU?bky?y6MO_Ro8uFZ zImdaj=>JdV|KlisG=(-k^yB?$+7I$8#IYJ{keWUqT1%$Y$usID>**U&fv$VvM>jt_ zhcJh>u##Wlx4JFt{R3l`=`}0a&@aCgRsHs@sQR6^v|&lhYtrPu!r0!hhDX@zdzW+f z;6GdccguEdg4pNWtvC4~5jAb8h!SW<5-qP(M6EcA6x!BRs6#&d|8}xtSw+-2u_Edk z>;Gr^e|m0|{~zxE`9L)fDxxv|XB;MA66!{O$NK%>iKfuwwf-B^=rfSqBK^CiKm8qj zLEnkWh0Veo%)@*v#9}N(alIw&QlXx*-0=!j>MP2(n9E78PZeKg|9__OEL)1P9e zQEWFTHMY*VYp@pUu@M#M;KOxpHwI>>zEiUMApefcJ2u}iYT01?4~{x7b=8;`zF(Tm zp#8is-_JJjRAUErA+z82Bd_eChdkYdee?q;u)q1~Z17~Zm<+yo&GW_(`K<hQC_SF7PGm`p|ELaNNdS+($o3_R9n27YrnOK5yMja!3gKWJNSAgefmZkRwrs zF{u2;fM^^!0h2HVea}@y)5sY}-TIrToScPun2*eL&#%YYA@uBG$Rp_hy zPPCeAROZBeJ8S4`QC#2Up!%KqXIS55y|9`^)^sK-Q1`0$O;)3Jwz)%|%N+U+?7|-G z!vP#Z{eFH08gH^A57>>X{0e$;{h!P18ynP`VBg3vmc5N0Y*%rApI5Cbv(EUuP1@ec z>|E^wY*h|$>F|4Te%vDGA^x`XwF=KkUxV|| z?wV6LgLAlmOSpnwbj;=dBaErd;>r9Eb?i{qws=NQp3hFt$Foc$LvPMEc(}M&KvjulNR%gHX4@m;iDZYBx!jbK?`u z<8V9@&EiU;X{vONlK%6L`u}~eaWqYpFK4PFlgWwd=coDTkIJty*N;J%AC&)=u>tfa zvbSHEvrr2fB5{hv(n1KRGX|06?h?@=%R1V8i{<3hzX1=BDC z<>)h?Zx-1jKg=O}?d6yMlQ9DH`B;cBmZytfkynvk?6}~+^2niE{Hdpmu@Sx;E3gWg z%kBZI>Gj@m1B&Zgb$TxJV*8JIvB}~3)SR;x>#-3P*oJECK*#g^_KDUX82iwV@4ooZ zZ_l4rmYv|YzcCpz5Ejbdv-(=- z+3@=&^&uqCbWHm{-@1X1+PIyM!AA{i$`$+nJNX)E>&v;Ofa5rcQ#gZjxPXqF{%_;! zyZHWDvYYRpBlC_6WXpN}?`8fkS@i$+@@@T3hHUr0DSqfB@mxVKt|80~NaF@Qs~@~; zDF2K;4uvKBe?D>1|L4En7FNR#zDwRm9lf6{IpF)JH!hN{W&EgjOQM0onukl*DCy)s zhjsqyTcsb3r=52~xO+{I%?SQ~asJmW{lOUI`Y`6Uy3Kl<^kI0i{-g3XE6;Z;k8=-{ z@%!cfz4HG~>3@a&MG9?5Bg_+SN6LMS5YI@IVGJ_D((Zd4J*(~+{Qn8`NhmnZBPZ@| zOc7S&IZPvGppITn&O+_bNB;jYZB~xwp_v}${xzLtA5a|sHj|GmzsKd_rv36in(0Y- zc)n{EVlkFtIaXj5R-+@q|6i&8@rL>b*}X~qgUmZFkS*`Y|2WzpFV@QcIeG8!L;t>g zukt0Q+cVHLxAgtT9k34`ws6m)F9Oe-p zp=V!JJ|Ip{qA*tZF#S>eMY>bM+QWI_H}rjZNOYV$iBstP{>P#-WRG<>&yg2!30KgI z&DuD&NORbWqw+ax+4GNX2+u!D59j>4B)U!ZXotQ_R{dHUzHDuKdOu3u(e8~w7=mFK zfsyEIGXDP$rS|jqSXAaX%>Nrhj>7~@LRd3(3ONlkP>#AK^8W&5Cq1lb(1+G`o-E4mDO*WdnFKF{`QJ%KqsIoQ1wmz|c8?^r;^ieMr&vJxuZ&jaW=fC2;%=}2S zLRfK}+d6*FY<3Up-OnoFSz)1Fyk_M`qScPqU@g`ow1L-RBR$x@MpV$ZA^7wkTtB#e zk@PQdj`Kr1YSZh|k7my$iE7vEz%J~;J{-Uy)S%-Y{~uwGzwCB4`5@a&=21wiA6=Jz z9Cc0#ZS?eI_WwQhA7M`95%I*4L<*U34&S_;o^|hC$kGde=f&+fhi-&9el>^He_r#B z5XKH1cV5_kxVBchUY9OZYQsDwyqO;60zOV>(f{4)8`;bLclsvO2a4l=$mHoT|4%>j z*Q|vV;{0k!bdJ1$OSpoLb@Klvd5p};=iL|OE#&EiW9)y9|1bWd+xh?UVcQ~kbD8{4 zwmS~ve|p7J`B8Ou@&+D{|LLPYUO($L{Vodo#l72qkYW9-mPq=2@Av)Ik1+nHqhO|N9rBN#qpFKskCg z4~%A!yU~(j&(J?=hN!^WQ{o2lG}~TT2F37-F5H2 z$JhjV+&wj}i$&Y$)kx0dTOzTD4M6=({r~#Q8>j1kM$>rx(r7-Y|9Q7{|6Q{KyRZlQ zu=Soa>p$K6!1~t>`v1{2SN}b_m*~Hrt^fX2{r6i{ZIo?DYqQKO)4#u)+Jo*0Puy=ttn~_9Hnv>2?p&eOdq&q#^_=RwsmNp!x zpG0A&{{Jod|DDtA{!a<3+4N#`hCGM5^Y&99FQGQ(xk`T=R~+}E`Go&_&vWkaoSj?m z9W-n)&jr>g)xKfw%iG2&w_2+d*IaW0eYlOgxQ~96{ImSW#&)u`U1XRG(@o|a=gETa zyXCOCF9~yCYJH!b{C{=JwC9)c{K8zAf#Ud3`=;X|7>4|__K_ni>-D9PV=xXAFbTWl zz0};2XbQc!|14WQjh+#f#teFPyt0AKEvL^y;d%LYqWp_)Z3uIO)$BB0hn$bFcg8|; zF~S}ijq336o7x;5FGutBNBQ@TdI~*XSrV-fz6z_c25Ye%8}Y&Rg9^vnumhE!F;1RL zO|-udS#8`w)!$p^hMp0Y#sPXZ_;wp&(II*b3ORWfxlVa`0ssBmWziAgy}$N-kxBFz zcbN)dUsuMFSrpLs-IC}yc{1otWzi|}49?*KF5wD#aSeT6crm&`_WTcH0Ydl-FGjb? z`{+l>A@h}9mk+(`nyc!l-hVAQPcX; zj~!_o%TCN>7s%rHH#Q=S|F8BxcTuH$uKu3=ImI;$BQO#jY*i=z%Kq(S|F*M#;{4F~ ze>O0U%uV*~s^3qWpLgE;ytCQ@ZW(CIGg=vveSPKFvh`o<(P#zDEi;R=h4HS zI$dn$eELEZmaxAI9{JxZ&5saP^NKnQxg5d&UO`s!5sKsQJ=?JEPS{^;weV*56Y3zv zafalR^%tBI#_2b$^ZaYQn}l~nZoaOp#nuP%^Cz_jetL-gt&DXL*IIO(kpEB1|77;E z{7-)}zT=?!_hs#WWBDJx=hSHa2hzx(9mR2N>VxaWwGo9Ad~D>{|1i!ib#OpbA$%LE z5yt;z258^9JRsWPxVUaF+rEpw2hCg58&F(FW4CrgVKsc>1LPspy`s&TJc8Q&1EQ_F z|GP1z|2>L3PNJ&w<5BhX5%%Buc(i^0$D@Bd{I}5#r2N*7e^T!5TJmD_z5ThsMSHIPU6l1(-|s&e{pgl48{XXyh3)m;e=^=0KGIY0`*EB^&!;c$dtXz%ubK7&@?U3g4*5^m7mvJxzP}$BRef2z z$5-u(=lB|Kgm6@TWk_^8=-RvbDj$viYhm|)XZ+vSvOVijVP)@6<)KyLxqwN2n}TWBx?dW0N9KmHpZf0_4q5|luQ?23 z`NuQ)$Nb7>B+;^of6G@tI)E=fQUCUIzAK9BxHjLp6RjJFKB)p9uQlS7T9~cAq1$&qnW>k661&zgp~bQGE8ysDAGBsNw2# zW$xrCxoL9Lw9H(#*r(OMv_H7#FvlT|rsv%kng_ejiRQ_RcMtaA01lxBM-WHHDD5wv zbC+|oWcN(ZMxGCSJcZNR9~OB(%cPSmzMrX{t@9qgpVXGYQBpi9WKlq7zx^NV&6xjE zX_TdR(W|~&8dZ0z)AtCs9$@aC{U2SsbuQa;Vz6~%wG-0otUY>M94AqG-gkqzuv3oD zpgHaTtS#8&nnce2kLv#ov-uy()YBI!H#Vr3quBoNHh;r47jOwz5caO=C9mNII-JwF zQXS(B{s;M2`w#O$T5%L9v>}ZQ+R-PD+qjGS=tmg0Q*xNSwXQ+_>!r~^`XGd{KSRi2 z7=e*U?KMV%9D@m%gfO-t-Kt(h&q`YtitF#at4)xe$K(Br!+wxGFIoTdqt?OD&tDu5 zaaQ?(X|A7va?HXU%)@;2eOH^T+1-7B%RrYxi`@DdCew}^h z)8|&Q&u_5LWXpT(KaO7J>z?QTo@W2ovG1GMcQS-ki)RORVGlC*oD<4%de;4Qq1gWa zlKdaq|DR%iWBgw-?73O9n;-UyG7EJ%W3F9y2*ve(=CZ4(aeM^LuS(Y)^#C$4Tm7Te z{o&|o_jk8$@nGm;4H|0kVy3TJQ*naS>9x-ot9>~QJFCHfT<-cXlN@5rr_{tfJc zu+aX0jl6+6;eF(76x;t7_+Q+0d>_rD{qK19G1GncpTBDVU!Y!%W+c(?nv$QfFMqJs z4><(GFajg-pSJ%`mj7^cs{E(E{+I3lo8{Xre3PwwlrRRcOq}WS@+aSA3_Xnh8Am>z zx7*DZ2z`%v6j1HHs=oHQ=%{+h1m{gc??+3cDP+$dJ`znM`yzEbvK+H82lFr=3$YkW zu^hcWBzioemZLW7UsWP6W0y&p>Dv-+OA%XZqws$y0_O~ewhFHQ%}|(SmIt+ zz8u|kPHJ~~bf4@;#($U8s23sKS*~5meQhd_200#ryn71wRlLtd?r*RAJLvuz+@Jq! zJM8|DK|4CGO5aWCBeM_8pQq<8n?HZv{CUrCm^epZB+4)b1Kr0sasuj?dA_4R@O+1R zzT#=V zjADP;(f{rIzqhsj;plkzA8l+?8X0=Kc7f3Tw@W;Gunz~2S;D{2ws(l0eU<%04gClT z59BlC(2cmTBvNQc&(q!$*<0^DlUvloH&0fV{)h5nK0Yoi|EJQZN?U#P-&=F^X@0=3 z)YZea=Kq}`&*1_t;R-5WQs*GA;ReF|zdrId!unr#$@}O>$-Ca){)g*-4WvI_|7#F^ z2$ITx=1>kGw7(A%HUcA2hA|k23F!NjZ-eal+{@7vviGyGX!Cew(QxIZ_dY{dnEw~X zP=v7q<&J0J@%+Cz^vCo6=F#Wl|L^?2lhQQJb;bI(u{;aui?I~T@xPq^x0{VZ0slAi z|5k`+6;@*n>Rx|1|8Fh*@%+E_^o{t-`F|C{QrZr-k=4lD8W`;$cOlKM>+)OJ-)4{F zeaQ3q3+O(foa(4!Od^7vsmo;eb9a!a1V!26Yk}ke22a4Grc(e?~8nXdRati9J4S7 z^YEeZfA4*ueV<-9#{Wty|8mMd&nJbyoc}Rj9HH%CA-Nd&7oLuml6}_K>-*^=|Njfd z2Ygdq@5koG_-zGNVKu_|tu^FYtj9(?9{*QCe?0zg8+`}wuXoT=;?7|)t*MH}y z?V~?l|Gn7$r~fB8oNtN*zpe?Jt+BtwI(u6WTmQfIW6=TEq!05i&9y&7ufY*?%{D&3 zc;s#}XPtmNSun1;o?qI)KW%)C|GSC*yHecgAyIQ~NR%Ywu1z9^c4Se&ahyblan7CV zj6X2$AWL=|caR$@eiYDhdWiPLA^Z>bN47amJ1_I5_5V?G-F}MVIfHY!fI2>M?M-W~ z(I2lraE0EBq_&A>e){9}2d)YGVE(@0xDU4x=HG{TH+SjPzteWd&fKT>qomGr@x9l- zA^mS_o7E;8->q%dHcmiIaXjb!uo$}$hBCHji|sjRHN_r_DLj*{SW`3O`pCe zgqv5lkL>Xd4v@WH)`lNs#AgoSU$;jJS^uW?2Q(ty;J!N9W;D~2sBAJW+IdN&(2gt$ zIF61tKHwj!4|YGrKb*-woGSlcl>gPGT5;5Qsg>GaH>sPGp?|P_k^Y0PnoD4PhA_83 z)IUy&GmQJ$qRtZfE>1ZPZTM%%b3wOPSh4=`YvX1d7uO$P|F6(f%9>s>y~+6Lb?Vgg z8|XvV0_8Kh`3uGIDP-X$AIEPO(AX-SD2|I8#m2BH$-&m9@!MV8N6jMZA&|A&gzNSW zh)Rz5&+EpXk%KS z{Zw*{YsO&$CSeMuVFt?4aaH?+HvKL#t9`$l%sI}J1#J_>_5Y``-|S+TYu6@S#rgNr z)*j&eQHZu-3qhBS$Ej_|H9|cih-fP-^NWvB)os;&t{If;s-;S3fKizXj_iXQhtao1x>I}todKR$hZ?f%Z?v(!# z%8wPUTZPqFgSA+Xjp%6b{UMAo&7ymQc#)?UP@MmJUj9d_!S^T5us3{$-hNp9S6wV;bzwiBo{kG^mzl=o}$i7b-$4*{BFRtMR`fwYe{p~J!AN?q) z_x*J~Y=0X_&o1Db%++>5AA&IEJM4Rtdxif%)=&K3v_G=hN%<-N^-<9<*NwnPgf@WU zc%=q&H5_;71MFPl`M>7*qkE|5k37N}0mb#d&&&Up*?+R_w6gV0&mZmR`SPRje+Dvy z@qc5)IRRDAc|Knn6-}Z~!8FW3{!cGN+fzot^c1)UDp3krX6R3`i= z1p0A=F@;a|KX}dl48m&2Q{)-ct$Z;$M_xd!Ym4h5i0hK$D=6-Na8^CT|Aze!dcO2` zQLpgcul$$j8hHbKxQ)BGkGwr_`pJ?8b;Qa4H5x?r{o;N!gxvGme~W%FYErcO;(v|4 zKli^z`!4D)Tlcro-og6Ij{Udjz$pD`uY5e(KlkI&j|Y#44!Ng)9y&5QsNe3L4eAbu z|0ep`LH$vC%c7sYZI1hQt%>4(vh~8=6Ho za>%2=me#YE4V~=YI`)4%n|hFKWyhPf(I=nhx4U*94&V@Ka0GEA@uB$z1H?Z{{Qf(S z0$TJ*wBqOneGO=%e{g<6jsHrCryW@oP`8QyJ)BQXk0;z4PSQ^y>R_#6GZnR~+}^28#O+>sRWd-^N|sNBvIYfA*{YUsV4O`SOT`Kar(q&-LPC#aj7A~UyqpZE9?^vCOu%%ji8U#>q= z9RJ7$J{kX*Ht)uHOHrG9X#2wYBgOgmP4=#%e{lVg|Ki;;3kk+=Lo?Pc|Kqk0_H%t> zB{ZU*Z3cH-|txcX1#6s2k1y7hV#V ze*S)=_Kbn_K?q|9!rFidGVCEYMA$Hlz)19ca&S~ej=?ze^3x`eJCm=}F|3`7@kXr9K?i{4S@@!W_)Q|8oA{7Wp6cF4z7h z|KsRQ<+JjkO}viQ9x;H4lR=R)nx&qnKYW!lS@r%-8 z-*)|n?jITHX_tn2ef|yWj1%0XFMtel`V(i3pF|Sd{AV?G;97%xMDD>p9KhxW*86+P z`*=Qz4mqwt{tNDzOd`xbNRiclQif;?Xs2gUz;UFc@g#W)XK)T-ZI`gd*#&x*zu&da zx^DCu4OToM&5`1}N9*&18~2ZF!1az|rT~iJ|O*bJNK9|MvH^uis-= z&~=*qKlZ5IU3jq_zST9g=DGWyahQNf$RxypDfH}3*J2ud211=7kK9#p2`?8`!*0zY z=b-NTz-S&hAGP$x2KSAHju)eOzxzgUpMk>zqou-@V+B@WHP&D)*5h&i+D3Z#Mk>f{ z$p1?*KW?u2opDM0{vE=0VGs7<0Qy==qeEoqzpWvAzfu|&9Qh5G%E?EhfzV}SfW z%KIXV`%g?||G&#lI4_F=j^iXw;S4&C$v@~q7Tw4pj{=J8|BsdbM?V_>M-O9LAKRaE z;<$j+PJT8S=KWqFdyx@#jl6;Eb>+YownP5zMxlod5!UCou+Mbu3CHS)HT2?o&Ysog z4f3F}B-qkKt#1)=dMFcayY4>vK4ae@vidpw_%HoADoLp4U=W627)D?u%Fr8`14H)w zE*6a=yVBYfa^@n^Cn2ouTR?X$AD^7&`%XD$ir=Oov)j02vK(pW)U$;RXk6mnP+XU6 zoqK+h{eMH6zWyM}zuOsmP#IDUDLd zDKjV&r=v8=pfpNH4yMZtW?>d$Ay$i3Vlaal#Go`{5QAvsST0E}r*J?41r(wbinuDh zu3lGH?|X&Qh{iOcQMyH!7{nlqm^h6QN}*Ia6*==c*VrAPy8Dms`u+Ak=j`*`kKf+E zz0d!K{~iA){xzbtg#It`|Bm!`_&z?j6aEdor*l_$kc^$-e?Xo_Z%Uc5 zJ=!N;1M@SnvE zm%A?v*)M+RiutX|e|7LFBwW}3W1pn!oLspQ{$G0A2jp|i%Aa%c<-GivBGbar{~a&b z=l{ch!6yC|>)+T3e@9;U+>7BK$d`~kZ2QEE;T7Z`;8plT^d0d0F;L+;%UqXxiC266 zu5ZNk{}cQCF>b~Gg#Q`;6#oqW3w~?zp>l*r*6(uMBEd;1t?$&Hvyb`ZfQ(hv|=C;fON9F;N?OUjBJh++%nGPh#_P{((Xc zU>FxZ=HAHlcWJYbvq*nBCp<;Ae4O2)4byy4eSJBEZ;5MDXPkadPWTRe^UYVn_sI1> zyAqxzqxpY7Ab*G-O zt^e1vx+m4O{2Z=ZM4-;I$=o{hNpwr7R)H|2!)iofvs-Qj)Y`|&}17$3z0_yj(M&)@`3Ve{9! zLmSz5S^r0_bL4yeCDJUHcCmBNN63fl^DrL4qqsIs7TFJv2~TgZKQDR5UYcM;S)~6NvuH9Q|W(3VqA!KlM?RHwWdZC~wBeVT_c?XCKl(=RCa_z%a%! ziCIh+%bz8lFF7Z_&Xbw-x#YP8j2@N0_sav~6Shx~N&8QdQ_huW<%9EWc}iN*9Kffd zw8bSmvctFNng4p7QI5RQc>Uwjyy%*g6)AFF-fsQH4dH+FjrJ^Mhws=www4{fM?Q^S zaX%n`h{3or;GBEwzFG#1kWm@%W7~d;pW|6~+is@nL)v58zanXMmHKHjY2T zmd=dtSF^v5u)mS{&mNn5bc_G_W&Y=E<;0Bn0sU8f`zO21|8HXd>jUUtWB=!}|1ng^ z{?B3mv*Sm|PdMMF@EN4}W}b-7kJF#_P4L5-E^z)L=ikr%$GkqR>+@gOh@))$G1oWF z=1;KwC+NvC_J1k+A8pdwv=-t+rfhW<60Ssdt7y9zTB)R^%ypTA@_nw}{ zQ}`Ap7v1CX-tZmz+3;Gk%;!=*AC!@LhKM?;z{1`t)Z;t+l{Jr7l^k`1N zfbg^Q$QB)`|84(+la=0uvhUO}<=>*R-8YjYCznEa&M_JPzd!Qroo63le%13qn!aGa zYxQz}uD0-u^z-Bk#4&_u&hPWmc>!0jYs@!~8<2j3H7^dx`-RE?OlQfT@?RP$MDzQi zIls}`|Ir$tQCTomF7Jy^9#R$v@0Ct(JAb*(>~IskhrF4*1^W=KK{%`|h$|C@Fi4O7 z)$@eB_&VEf!yUK_cjF%1i-ULz-io*5op?8{_s`0U?-gD!_VPaRgV;0%^kK5)OS{5H z$p>&++ox@}HO0bf!Y713h4n9Fh0l-|PKI!TJcTwyWxzvZW_-UcD?Chp1dk$9K3|si z<;fH>>qDlp!eh2QfhUpvW?|?h2QZ9HeLmx4wEq7jdEpJdB{Get(DH`e85=9(52#-$ zngjSP@zET>UUk5C=xuM-hxi3{uxpFTfuUOY9>a(A2UN)aYy7{|3IFK$d)J=uerbFU zPow1x+8^W(@nifH(`Wh5br|Qi?;M%=|7}uzo#LBDj;b%mw$wjJAmjfpe$w_SvhU1q z*57Mqrz5kjuReic^?Wqe{&VMe7Qe)Ecpfj{3U-Y<|B>I!Kd4c^tK;YE)$i*6bglXw zqtY7FmoToakkDqBaE>G<8}(0Mk2G$;UfhID<2dWz$PG8sFMOjgMD_YD^mYBSn?Jfe zGgj53{QO#BPUt>sO_yrz7u#Ql+i(XakE>%F_Jq6WbL!ez&-ZTnJy-m7NAvfi z_Q&TtUkvx!_7=Pqnf8Zg^LF~+Mek;ve?jzrfXCU>D_7fQ@eOSS&-RJB-0)ZK_whVy zKVHfWkC~hE)$%-RLjEdrDnlP#-wI#8xD~#%xNS`CZ;jRcUHIbGcKE{1<$M`9Jj%V>p zJcqux^#PLBamQe>@$U-b#zn@zjUA6P82`>Q{#|1H+qQ(U@QDs%*ImZ<$tmIKB>#Yv zF>G>9{Cumi`zq@LnBTuZCW?x}^Um`Eu3%Tf{n$V0_&xNw^Nzi^FWf-ii-kt_Z45a@ z&MzDPKcL*JF7i#O2bF=nm-mI6>~}L_whzRO+q;figron54VSs@3il!XIN4{+xt~7Z z+6KqOJ0`yB{+8^wxG(H;-0RR5%|HBDR=ACR2kye%n6A1$4 z8IyL#KR_9L-TzDdFeN-t;<=U;g?prRFNTFjI$rBL^dE-eBKq?F;XuzZ>tx`!HGP`QiQaIdZmqU-%&X!&q2%ZR(+vYn#v77d|TP0ek{I z{sZwTGPY90{x1rjp`XAh3~akjaYIY4tAhPsrmaxIem#Emxko;bEn#)Z^Iq93%xb^V>fn%yXn!K%EK@DLuxBX|^#;R!s6UJT&ENBBUH>-upL%kGUn ziCLs0-F+tU6n(DWv+3RwzD55I7S#7?aVgAuK7BL4@qZ|Eoye@qSK;}WxlZ@^J;%J< zzxZk4AK-`hDWAC!D zSNOTKdiZ@jOUB0dcjA}y=kPoRF7XfD;PVmrevrc$S+S0tHv9{=w|rBcI-DJ@(4(=0 zT}k-^k+0nXB44`{qP#kX8*JN)o6vhk`@dPcpWfqJy@lL|*J0$WcZN8Ikg?w|h_@i}q0Nl%DWBddoF2^%ZzoT`N%{1n8^gQl@5TG@etZy{ zf3_!lnEWWx&zPJ0r9I&R`Z`~vPmrI&XK(_EN`1#<8y>>Lhg@y?A4IO88qi$@Xhj z2A^X8^{i?0i;GFW*Ll82e;PkPwD!j!hR$gJW7zeLxXvGYU+=X2hxjql_$hvlXVKSX z{(sazP_BQV)R;HBG&BF7ul@+%_0dB1J;rTIU}Bl=yUJc!V&AiSr+3Ux7oP1mKOOTa zwr`9LtlhXk_BFHrF@Qk~A%2$q-+I;m|Cg@eIXsUSa0R<2eOtHz(_QTUIQu`w{%70& z*8d0fLyRu7|1pjPCX(#`6#Ji?a$JIay;nLn;bz=|%=&}k_tEFHb!YK9`fXTfbnW%- zflWS-jQ`2ez5mt!XKsXWgy-*d!3r|Y(F--i334HMp0?0NO)L*jZ0^1{R9Bj_#53y+eIAy&b6Mg2Iq!;j|~>t2Y9#=J(PqGB|~d ze@%z-R~{eB^Zdy~k>^h)g(t}=`>lWIRbl;cJ^+6d&Hwk^OMBgz|2TQ!XZmS|c85v& zEH-ZxCZCG*519Y&e+A#7e+Mmpx;rF}?F!$cC&@`XP5%LYh}pB+r>(B9%k`0I;b<+% zo_J1p!uB8Ir}#N~#l`R}J(@c|aA;5XCH*;!2oDR#$)UECsM!hFUZxmU&{W+ND06DzU=Ti``v~+a2M{z zJ-8Rs`nhIS{yY9}`UJ*`+5gf?RIvZaWPx(Ei2c7E!a-@Q^SgcvdEwVN;jQG`@lL!O z@5TG@etZxg#z*l0HlOj#*}9qWZtcYA-ajEeT00^f|Mw30o!(n&{2@!=$ut%gU%B~tp4$R{wV zTz``6MN|jP%I9-6>N`Wq|SziY<7$wVFdw^2L6{*&}6@-E-a_oVYQqPFZ0$XF?V7W|O@ zWBe2YMe=8X>&ufrliCDh>~rmajQx?T{woaO=k|LR&msNl`@-{N+c)kDr+s%X(63Xrs?v_W$;+itrHq zVJs9LRQ?_eDf`Uh5pg}M72#3xG4!@qgeS-+5p&#tbOuuup;veS!&L{BzX!vR{o?2o z?l+fz;Ml=1h#|x=j1ffnf7o&3n8Ym7cnaUb^x^yUznJH5F8^GedHv@5r_A?HlMBM5 z=k5<<7)JsVNMaIGI60$yNA&EzBfa&L2gB3kh1)B_56Cusyr*~b+kIt4_@VHR@l*U9 z&mz%(fA}8xOFW0?G1=`L;$NW89X=RlafQBX#`B}6k#gVj=&8Lw>=Azh_Tnb=9=kup z#NSLGZ1rq#3w<9(=+W8?@ea=luM?N>PHrRbz~uge;V$xSB&{1TtL~b+oE7d7z89Hw zCzTiT>Z%m_mMcO(2E5C`ixt}U-pfWs7`{{yM)KMBm#xpSWNd$ty=)9NnveGu>6{+Y zexBbQrq#zYCF!VVzD`!CpUT+32iU1~72&PY zdpmlYD#AO-6TZhDYbL#$9zQG3V`N>Oe2K0-^j`6q|9HgN&wp=hOZdXC)$!lh9o|p> zAU=$b;sJaDpTcKw0;dqoD{3SAlqvo0VZilY|Nlvea|uU2G7s73VLXCI@fe;!%joX# zBssn8{#V_<>zl)TyZi5Q{~hlCjQf|?n0p;ZBFFtJ$C6v_A5&QW^!?$&8`=J^Jm~-7 z)^kvnMeBd}N;_IZY=9icB+`GfJIs=se{(Qwe%E(Ie+miDIWzy!{YU>E_?EcuAW5I~ zjOLQs1;S4w?b;UVU1!X7y2gXzhVcXZ5I@FG@pC+j%=&riumNP&2acOlSUzqkIsxFO>Yakgc~p3G%)I_Gm3U|S9@ z<%H+yG4l+1@dEt{cFiio(IYOQ9dZMGFK)ukm{eyb5AO-L(C5mXTYZ}TPzd{kGwbK7 zPg8~L@BQlEGv+QRvzu{J-`(r%lbQe5`igKHeeklj$riiomHdD{t4+SDZL+M5LJqCJ zBHUr0#J0YE@@`DF8mlGmMbdN0tczHe6%Go&1?l7J1>yMy`H$>lm-m-=zOHl7bw+Kf z_@S$9tdWL2;jNB&JKl+R>w{_`y&uRay=7a~t4IYp;&gXK8+ygvLEeRE4#a?Kk5_wdmC82Z z=pRpg$JuQS>`(Um_5a@`*>UXjX#M{yuKjMu--CN`5Jzwnm6$fJK2yj3KF*G07tR;3 z8;jVHA1~|DfB9kTa)Cb90{y79 zZ1sz@G`kT)KjIie9e6Hf|aJJ;`@JR7J;fu?!3@v}E>`?zaTJQ(q z%bxd_YK;F?8UOQref7-Vu;|$HSi&+MZw_IFoK`o@q~tO3A00ny$O~gld13qrUkgmA zvmzhmNpk9}?>D7?N*}^Nm30Pd%}KFu+}xGnI_nJP=Y?<;hl`H_FajbR?!_UT!@ zF1nP-WcM@7gvBn6k$J#aR8h1S)qhn z|LopSN?v$ZRwyIOQGr89cn*ijBRGmmOnRQt7(+EZv;N01?E~X1r}ZWLR{zO<>nc_0 z`(81JZ;>w@Y8=zsr2KEp3CHMzm(Bm{&I!lqwHQ$^3@@v*$)WX}P$w=?{fbaeHXyRA z8_6a_cJ?fLeh$sTtw^g+7ch?$HaxQ`@n>)r?dU)!y3l9rKGQ#t)IV@Zo$Hu5hUp_4 z#`pW#_ZUCIzQ;s0`yP|@DNHx=4>;^P4!MpR=cT8P@(&;v%K85lnG5PXG4vyj-fH%@ zdU%XJcvRhtBz*=W+BU<8V+b$*|8UCob7=XB`j_3cNN)?qYrmNl&eNB$j1{b64ShB0 z{|@#!24nhXwaw!z$|-I05v<#10~c`#TiC{B>|nZD{f}AwAajfAzcuwAnMO3%eKe{6 zNt)x$>VHf$UhSKntX2ObT7N4m<$Fc?^PcOiNZ!3*! z>sJJs5n3}06FV90)Pl#8oCdbcs{urC~Ms`qOkcD>+=|!i`L)L>4RsD{o@jS3nTPl#4&_zafy|E;WD{{N#o{OY2Oo)#?NQF zjr${4I1lNVx$OPM?v1slQ0ABz@@;F`tv@Gv--y;XD-hSW-}pR!@Bai@#^*7LF^nUD zi5z3!x%>}|?N62QKgi>MP~bX>Tn9NXoZ8@jaEbo`+0%Y&D0H5-+qLiCmKBQVy=QJU z9(8NjPam|;0E+1cFygr3b^j39_AddHh)b003#DWkBEN@nvI3DG#H@Rq!y)0rNH=Jg zH)*GLyAJd%yT4WUx6c0nLzVmwFpLo#am-Ovq6*ch!7&`i^qTyye#`imot18<_CK!m z?~aywJ{U&=6Wj9tj`lw}<+vX2x>h=Ms7C{Om%abhqR>bmH0~db{WsB@k@=sdZ$7@^ z{iC^7y;`lj(f&DQ4C;)1&Y~S1h}PfgBv1c^y!(sYp^M&)82ZuIs{P+_)u(STrd~`L z>s!(OUsg{dZl5tEF@qG&VG-vsU9bNUvzWuYzTlKHFikEfH%61%|I!>czL>y7rT#}u zVhZc;G5$Vu>s9}n+pRzFf$Xp(-St<7uuN`#Av>&)t5}QTv4PEZ+!`XH^|vkxU&0nH z-%r8|1T-W$cY{07I|%b|LHaMf4%y* z;UBmE6RUedvGW{22}&{P_-OpRj6Rp8475)^ zvJ$Z^ZI`5egj7zb60XK@rEzOSYZt^3{W~}6JI?&yXgw>`*r)fn^?#aP5suLZo7w;N ziPrx~52=%dqdA1}c5M;zxP21+?67Cl0e8w*>f%~)bx3x*cKJQB%j<<35c%FLAY-SK zeaoJ&`^)$#DDQ{rJl`($e}{9)&n>^m3XP6yLNi)%24~TZX?6GvW^X9yv60B$Juke3Wt?U&MC0$#n!u~VYgoqyCM)DoT%^xwZ_nmuhfDM= zEX>H;WAZpTpOqc9#YO%>m&ptIXLiV}Z>WEei#+6`0EH;ReiUQ#9qflH{eSZCfHv|V zes}yMZJn?K_ANmv$`Gwp9ObE#nDz{3ka_;z!Mx{w{rS^J<@NtM{^9xi9+JL?O#91m z>mSkn_!avzFxm-k5Q!iH%M!uJ~?|sH+_yhKU>Eip5BjzO5^Xf?EhoVMMily`u|^M9EjfA==$w9hM4j7fdlTV z%=o*t>yzS!OWaq1@pm#_Z2Y~;{m1MVw;ws=I^+5chS%)3{}#RfHSeFyjQ_}cqtY8Y>;7Ba>k;q2+WW^OeF{m}u`HbxOqTBrtK=FI z71o0zqp{j~_ZN-TM*scW5VztRpYt4MJ%gU(d&5O>m#~Fx^oox)>__?|+2H_Lg7x>XMac^v+8fHqsP1pZDeZuk z&&kujQomU5q1?6#9KvB7!BJGA3e~8=F&sxN>QIjcG@=Q8e1rNippS46L-aU?l@HPU zq)~PF7&8ACZBoA%c+W-ZcXBGvoCNz_pFem>nZYK?^bcOtK0vc`wBigB75Zt&WQqO- z{r~Osv~m7Ua&|`l1m@%F?pAfdj(+rQ<0D6;QRV*863pNC4AQUh3_i&};KkkH!hPAH z%kk^x|8!0+E2uJh(R>)PXVI4hNyTS%}5tp!q z-iz}8`mV4|A8e5Sahbk@>+LuFe?!WE`YyAUR zPifQn9&*TBOrKZ3BdVk4Fi%e*tn4l*St?@sFL~>8alTMp~oyb2g zpC0`iFB(5DpwBHy8-?^DEYQQ8#6{y*6i;`B-OeKf{3MvumxX4O$i`V1E2)$9G|#-RFhe1q~; zG`1g&@khSJ(Yn|p>a5`!`RpT2A?3JpSj2fOVHqo!*^af4g3?-xi`Wew)0EIq|bc)eYhz-;0F<%JXvV z727s{v?pZ!TWuEPA`ki4e1kc#cb*9A#`+6{qqWfrBl*b?ipX{C&7SjFVL!bXtsid; z2gu%Ye9y#}&vM%-4uEAL0^9}Hs{N8<2e2#7IRJNujA^kI`>B|_*Ofm*@bR2 zcvms9A90MKPg|oO1D?g;hW`tCHgR$oBP-5jpO^a=&j_b*4(a#kH$JH@`^BzsUU&)1 zSivgN=3rj<*NM5;u5y7jmy|U);E3A-#8I+$Xw(hn*WdOgZ^vL&jU`D=gm+$-8 zmgKPQ*Xw`v%OHjhs2eKP5moBHqv}n^M&o8lGKQXY{$-uJLwDis@U<=dl8%`{R7cM( zs-xD_Rb(0qTk5|u^?!x>pG@Sb{|nUZMe2WYD%ae%?vI58TRPMKW_`iQZvFMnlR|P# zzrlun1AYDHgcp&fM{5AhZ|ldW|JCJBhbJ~ZZ9U1x@c91EhOcgYHavE$$@pD;_{!?X z!5hib z(pei?@ldQG{N+qTShj5i57&P@e7^a!;dABn;Vd3;t}pgCg;m?u@Mvvg__F8orS(sT zj;2OyU;6i*{@QihHgFM_@OXa+TV#)aXg%qDbYoliug7vjuXQS8{=4?|PGef_1#Gi| z&~Lo2uRb@76umkOU(9EtT^$EZmu4 zoNz!~30ijF7)r@9tbdFhOJ4ZJzX=uOAsohL-=1)UJc>$GpGbALx(I3umI*!=pQ&`x$l`gipU z=-XHqxBezRLgpoOitj=J#sRdgOMa>FiB65=P;C`6>|1thf}GgTcE=>9FukaMfE^vpfuCbj z&yy+Pw6y{jkm%nV)|_V@8@PzcWofML4VUQAKLclX_J%Eb`c8G83mcAC#s}TjT-bQZ;p|%SRNa~fBt^Nr%wNUt+;`U$|4N$ z8HoD_;4ntk__G}L+>a_xJpUfwTU~VgA$~ikr)Spx3F_ez^>DHLRUvoWGQCT6kei`!B`@i0dC96WU7? z^kkQMrbGWgjkw%hp+h>I=t4Ip^W;&)=$Z9(*~$I%I2K%QT7FI;T3>feToN-#Vcodn zIWqe9?;;u56w#WGQNKWBN1qqBgk?nj2hqBa>tEvk|0eMNkJcVowQUXS*uX_x!WOo1 z8JqfrH~-%H6(7((+pT=-P}hG~`PRUuus!SFxes~{nTu%c>Dg-E#Q(9?|+dQ^$z@AXH`)@VnRYd0Klef$#AMXqnv{deg9=yDG7AG`k} z(m0ArRG}I*IELexu9yEXi#c3h|8>PPmKQVkKj!|CIII2RJjqJ^f0#n8Gm)c>_I0A1H8L^=x_JCS=-W?b>jW4PUb-G>dD+8JtBsI?#zObR&j-#PRa_YfQN1 zr0@A8o9ae=cSHU)uRnT5{DrT0PUJZ(BASbKo?OB*R4&AfyHebMH+jfxkZRIa=k%xQ~pii05kL%?FJ&s|FVDymueN_HFEH78d z-;LfsCJW_nOdn7_V7A=(F;CB|Ph?x<%aa-R$yavmzq)Q=j4X8CBJ>{E750+{Fxahs zLf>D!pY1=Rf1*SG1X_$Cq~BH)PJhVw|1Zqr)rVDL|EL@&B{v_>4rOFHDsTvgaRf(E zi7Hg12FGw5wb-19+V4eS{jc zAN=gKp;LGvPLC-E#(b;nfj(p6{e{L44;Vj0W`2LM@k3+UBV@5-PWc~lGfo=!?hx)m zoBtD?HkWexobh$cVh;02A&mu$))>E(&baiVweBbO8^3g(XpZ_6db+LcAiWs+5l3%q zUx+0gM~}wD2Ra=?pTWo_*S2AdbIY|}b}n&=&RfGdauJiMTf=#B3CZNGVb-=eEDNt7 z-Dun&^GKo3HT73IN0s}pm1d)~F-#x9s$t)in$<^+Zt!m!%o4VGwhFJM=-@2at^N$oZ)K;Mx$a zT@byiXzi@1?-w|lAcHABPwU2KBu{2^NQZ)eeoYl(-%s-`%>Qk3hY;i$oC@6 z)-R%O_VIlni?MKfDI5RRaDcx4rhTD=EJYd0QGr9~S>lIA#kXe7divLv&Un_FL`$(24|0|ULo`0J;g{Qxr z6V6C$b3TN#WIH;d@aMeWJFNlot9_wU_`+-Vg)VZPP0$^MjR7V+s~Ei>akS_Im|S#@ zHRHa*bIY!6Ntix^g)!g2dGG$5YbASDb3%jo6wYB0y*cXNqCMd}eNf#yz;0inFC+S= z-7vd7uI?QoSH!)%|7cZs4eQuIX8rGPsu$>&u!U`0#tt(6eeC=J3}Q&zA)cd-*M1lw zv%aHVKrZr-j{+2;2-79nA4~ej7WI#5@6FS%um9Dh{da_aU={zsO1@g^{_E=#7i)iL z2PD+7`=wKi11Le{!=G&56H4iGWaPtNMlZ+0j=l|~FmJyKaXn4^5Xi&mJ;%l+k0Q3> z*_~H^la<2J{J(4eRUWcme8%|0Mg27xxXk|#L-aVVt^a>g8?AXq`{rB-RnmyoJUjhK zeNOZo&+orE|FNB|g#Xd{|C`2~Yov7y#}UnosU_=Bj|)F}MQ9+S{;$R;Y&@zd3j1c8 z$;kh$l{|w-`N5qfulc{V)35ozb6gs^W2?sLV@P5^SrC;G zLx{?VsB9R)%l!*x?3cnhq~CL6I8U~HgMIyaK0WkhtY8&uSVuJW&@Z14U=Ty{eq1?_ zS%0}as(D3CtqUh=eul( z4`@$|kJ=w4(SF7XO3BRs5BLt0(<^WYhtb3KTy!60^dmTm-aO;SsH8_WY4mUUDta|W zj;D09>lX-T z{C~df+oAW0i(!mDc+|ZiNuR+8J(`0MM>GeauipJ*0D~Cnl18iV4K~Khgf9<|MBrfNLRq3o@9UGX;QBSHH zFVg3Z={Hg@U!reep;miWT&jWni*0c|eDE)mJLo;cpZL4VCd4Y$t4HNq^=__k9y0Ul zO5|hZMjXwT*vHuXzB!oMKl%14MBAI#weQLfMfCkB#sNg@ewUCJ%$qMI%TSIAB--`e zlgSzF|EBD4n0^FD5v_SOhimJQq?M_W4cvEF{;zVK7(DK}nq1eI@&%E9Q#Afx>G&#C zqXx%t9JR=-|5PXcANTz8JpV$^A8GnRj_1Fuj{w*I|Iy(2H+lYKH2-S-)AH;a&HcCj zf8^(1C(U{^pb<@I#u=PNJ2ro2?mypv4th@)yT#a0#t*Yn8SpsYt$J;O}zsVI%CovSJy@V2bxfyMs@2Xdwfd0oG4~*N+*SLSj6Oh`;=*a z(C2E^zsIyc=*w8BQvV`#T>V?8{UNT$_EmBXz2eu&4aDl*m-M4`buS8E!f?L(LS~%9 zdHVX<{}^z6gJa^C+<&S3hY_4SudRt_e(;v#)1THR#Wwvi{)hgzmGU3P5v>*TyZ*P) zKM8lF({{Uh{YP0LYsvkO`9==!2|4t+4&N4X>3N9$9hffljkWsLP~g~}hCLzQwgU8S z8zUx*5Un9Fu&iFge&J#a`|hH3Ei(TZJ(m>@h$}%U%21999KvB7!BJGA3e~8=F{D3k z{oQb%u?^$Ye`>7kbFZ+$w}|MQ+|&ow`lq(PTARZD7yjm-hPHQAgww{@>V!AHZr(^F z-@Gd{kd0_UGg@&5E%)6Q&XVouKqoF3kL)6&@$W>Xe}~ay=tmrrjZq(vwm&_x`DaUo z=`*<2@0HT`I3M*tR>=R&@_(oN-!4yR4`lxTDbJ3e#ec-pNICW#7I7X+SjKdv`X951 z`q<{{y&I&lfZv<{FD{8||NlFg^t@7@7r83Ewo&u)e{x?~qi?=PAKjhi1;58PEqviG zLf9ZL;u5ydQ|VhI7yjn9aGBgeZ=?KQuN?lK{9o?gN_T}EdM+~m|MQK+3*0NdQ1bfF zo^yM6B=7d{#ns!xms(3gN7(ZxH%my@O8D_L&{ods_Qk9CxVuO4_~c)ae7p{uhj zJW=tc@K)E@@|-6h1z1=;7}B<9H>J<2w9DW9F5aak8&% zmxu10ityyc`>cgl5%$}+7za>-v2%YEMva|i#>txgC?r>mi=ouEGMw699RB=3aVV!( zpl$I$INf|_Xem4pTJca;N%+glCE<{5NAPe~Y54q^H-^u(zagA0Dh-buE(>2QEDLSV z7%$c*`)GA}`0}wgg)eQtF?7%$>%K31^>9Tv>exy&H>vON_@Vnk%ip2iqJ`RAxWQ&SA@A_Md%ieA>CCG7WQj9#419n z!}5J!eG99?Ed0OWw-l_+J?ip@GIrR(EVZk%?J4^+#ho7 zn}>W9AY&hC(?;`Y3WYcO4u&Fff29A(!O&M?{ogucug9;B!4C14kMoxwCP$3PCS6Cg zc2fE$*`e4mZLii>{1rYH^b$;~pJ$>vT3r+Q>d)t>oAa*vtSzY9Mpul%t{HnJ6ZV^E zRJUW2K6Om}Tdw|XX8(8oX8qq8_CJPK+5aoA2&K+ZhQqF-oUFj1D2yYR)`pnTmY6l3 zFo$_#4k=v!|3IAmuU}ytiB|T1m+=Sflu7NCsd{nR0!O7&i7HfM(lsSrdkuX~-8$=9 zkI|1KTGKFH?H()LtF}h1xE}41Iu(Zn#&D7L$F_FHe(j6` z_dmn_$MyezRVbT_z1tG?G}-Eym-nxn5k8A{q(7b&rknW(oz?%*s{f-?|Hm2jg8IZd zZtNBIe~tEsa&sIBOdxrYf6+4kB4qjxW4l6TJ-pkm=8H~gcOm)*SoH6&x&7Y%$JDdY zIq3`f(4)1zQ|MV&*NE>&9Ak)V_vqhWN&2At68-OIhMvMmo%|y%-e9Z`{jRyMoo~{T zwJy5(Cav*N!ifHyVR7j@^TIjDE#f?uu#6R~B3gTOy3xIpv)XByeyvq`Y*8JIG0$cc zi9?>j5%*3`RJ(U_Qg}@o>)60WT*9V#_*>*Q(#F^>_~+JT`ub;gGfvdkx$={9?ug6! zzWiUSeMYu?)AK9R|4PqCvcNUvY8RmG8UF!5dhP19W&FffwBPJkh$8IA=6?#Im^={a z?~uRsv9C|4|MkCIFt4;kd@0IMjtU$?&xXE=Zy0y(%nFBvkD#|bD;y;&G1%%GL%dq~ zQR&+{;u|A#Y^y>wYH$q4QHwhC)v|Bd$paW<(+(xA>z1-k89CC=zGhR8ZL_a;qPhI+ z>r3qG4)$~x`}&M^<=F5ut?cu&k$vv_^xRTN(-+X(r9MQxb46|J2C@-NXhvH!Hl@AT zN`tl{P3YwN--`e8Q6)6e(@!tQX9 zehFLHMxSq_-#0Tr4i zP9~&1L680oGwFO&m(>ZJ;qk7(XQoE$?EGgu!| zj*wCR`pHt?XodV<#Gd&zJN;vO!?|dmJAFsd`T^x^gd6$#KjV5nkQ08S5A@pqOV8VH z&NGVI088{`EHo&WkXrOCu@a>v-Bt3!yZFLHVQkR$}>tEVtDbG2W3m_CAH`@FpV zIUrnuQlvkr{S{aLC)JVq4d%#s;gr7iG!`)0&Hl$Y5|}_VFK#lX{&(Ig_Y?I2MC->c z(kCnQwM)AkQ6E5L`_G~6mF#2oPny1f`FiI;Wcydx_Ye-_2znchU)Xk(J}B*|51^7> zh3KC;!-z{eT5C5m|Gv&X$KC%~ajq%eY9IOtS?!n_9K&(cq7L^tkY>;FH9 zYa_&5XA}Ex$New5|0VZNPOQ0qa#DDTOk6UyFP$bdqZN}m_Q~H9&d@XSAIr^4r?=yG z=RbCc>+zmC$u9IBvYr9i;=7MkxvwVob=dVNhx)}2*SIg?Au?`zUnl$bobLjI7(!hB z9hR?0&}^*!B;t-eb(xRA7tFu+-p7QKm|ir#zQo2R=hn0f=qY3T>22+VuHTH$8>1gb z0uzY-{V|CtB#vksNGFAJSj42bB+k?4TBVI8`Z5;iX{0)w3oGJ!^7zbU?V90oX zX8r#fg!<^IW}@FY2vK__oRK$?mM<1Cx}mI4evFfu|NGIC%8AJp`9Gy>>G$5)DyLG~=&|gO z^`ATgax!j?8NKBv>Nj$>MEb?juaG{O#zF!6<03z1+uJ^({?%5@r}wT~M@f7EeX!QO z9pl$bFTzN*dlMJOPxhaI4gYN)qHx$4s@amQJ-=*8ACtf$jm=b_l(h_F^O3u=`&c^QHLTLBai+SpRvEy zFVR2l2HTYxNoB{fGUU9ni;v!E{wCyf6xEVK0WHWCFv|9@Q{|E?5~KNrbMnAHB*BDZ~~bBA0byFYc+7QPOO2w6-92I!E{c%XR#WxtOTX6cB5Zd1Hs&GWyQBB*?a+ax-_-x~I6bpYkGxiGpBfy)ais6NG1QWEsK@kq`5&{Z@~8TA{<8dubf^4@ z(Zllp(ck#z)u{i4lfsd2`jq_=hxPqSrx8tP#^h1;KU(Q?XQbVn9nR3tVnH}9oFeDj zvqQVMo-97RWG8xajSZ09h|vcYm1|4N1L1xQ3y%m7DSzT*W_|Cd{+54Rf_t~m`q$+h z@`AC~88U@)*nC3>i{yDMVHs^d;a~7;?Rfes*07Gv@9qg3kv{5sB%^=(ULv=!jmy|U z)(?DB$VDFVQGh~3>&Lae#`@sWDiXf%TJ4Pk%9g|5^nZXV{^vFP&&h=SCN4`8lk}-|{_~A_;fQpO zq7qf;9bk$u#Dbr{fFXL=$Z9LUB@bY4I}!% zhYz?$L_P#h6kEg5vv~aC4WX;RdV_~=Hr{eW_)7K7p|kSl@M!%_;mgf`7`{|>Q|P#K zQ+Q-~pK-2z;S0%ILc2C#OaHB*wfNT1hSOtz7}lL*1E;G0IQ)6#ABWE!zA2oI{gd!8 zK0otMWPpFjx@h}D<$oCdGA>^#9`9BC>Z2E)nxN`2o zr_Emxzy1~L1(MPD@ng!auPP7McdL*8A}3t<(X-i?ZQDWCf0l-_XY+G>0zQrx^*bng zqI8<~YiE?(HgPHYl(Hb2zmRMHO#A1~oRCMqHh&?X9<50+ivoHfuIGPk)5ur7!})T= z7hyk&aR4PKMIRfZf5Z5|I$Hxn^vrs_%f<&V%Kje1I1-pp$0ZTX;h#EZ{a-S(K0u@P z0W$0F6{~ym)xmk{|06k}%z4UDfkWs$YW^7x(+6ACe>g%viV=0;Fru}~hEOT43e~8= zF&sxN>JUAvW<=#>y>K*U)PhEO6Z#g_|0~Xc=wH7>JJK%FH(H?IB+vXz`!u7?-0Rb? zl|Sica2D;j{{Njb+W(mEl&_GcFJM$Y8&lSflZlJ^-;~Qy**r;~(l3*!vBrXQI?;ue zdE2_|)4+}wo}2Oh#@O=oek@c-8!60p>nju&%^w&eljvPF9!;hYD|KJn`W1J~NfBN| z=D#nE`h%1cakB5S`X8D3IqLhNV)bH)HZmFgd-J?wmavQ!yuAIlDm>kx{_j#pcdP#~ zU$1S6G!`&=QT>ncZFRl6cVbQbpH%;kssEAwZgyCc&N?pQ61K37&7T{F_me21e?9BP z>{#(j*}bbTW&d^7-)HX^*Ii(Z)FQS;g)%1p@3P}rzsepi`P=M~tiR3f%m0V${?32M z9?1H2_F&Q9Wlv;1lbt;NU$WERd@;KPqglVm9;@2S9zVL7ov8n1cCqvhpai8TLpdsN z2$}VN-?&@9qW=A7_JqUYj^HRNak9+!TjBd9tA%TD43TZH`3?PF%GKk-ZSL{(XLqt| z>AlL>$cM6yK3M17qMqJ>j9+!N^0U&t9pnFV_DXht$Cd1Xt}EGt=9HsrV&kOMk~(XEZQ;c+0B%^ls!w%RlJlvPo{*^;xu|JaZ*` zvi7CysiK#%JEXDxCog7qk{3SxVs;nVjTrh7#~6~BL8kuoU7f<_jXT+isw>$k@#m0! zv-TM|+2kCJSF+F3=Sr?*&m!ZWH{_elyON#GaZJIL?0NKbyp+9Uzh$go6}_pKvSTwZ zWv|f(FTIpKfOYx?M%>fz;{Q+D-N$!b*Ngvu?Xyi7FqM=P1re1Lm6UWT=tNRVN=m-T zQ7TG~kx_AsawO^WGuVIu0|pEjFkrx7V;kGpHM@$FoaDqRGBQd|u1QJCcdjJk#8d-40;Zol6jkK6Ng^?tqI@7Mcv`Fvfx-$|@I{!U^APIIg6{d?jWvKI}~^DKD| z^~?vSs28Tmzw8MoJR^Gtd+Q8km@M^dWognsUHVUz{*$FYxqrU&N3v^Dkcy!ghT#~A z(P)~gyo>ALEcYQ>#o3neZXYkwHCLW7a}zo=tA&@U)wQ-qm{X_$@~n2A}KjnYi@lX_(z%29#JGWAow`U%xp>X$tAOOCRx zzN*Vrw%gP%>X^n%`f1P6tbRC1w#-r|kZtVkWXCz{Y1AvVsmhRh%|+C9&Lig|+5>n{ z8EH{|GT0X)+WXfrOFt(=KZiM!TNZMVi#&80Kjf3qUZ4W95Jl+G?)0zMeXsvs%&ouQ zXD#*8OX&t~ObTV(Dp3=KFZHd+t|$JU*i3f6A>WdRa2Q9>iDNj9+WdrYf_#5}*-7Ru zoWeohx}daL9e|A`YWaR-XHtWzyGh<`nMgCmGYhjZ2MuHNp~hQVz#Pqe z9290A^L%v7qsyQ3?%YS~LTe4U?eeY*$xQ4KK8uXbAnsp(z%QBuX_qH+x$htCy?pbE zbnltw-4AO6YV`fdeTc^X6=c;}R&p9m8>? z$Id0O-@DTl?mZAc?P{Whd#vTX`eH+N15~o)-f)5gFfKN)R6ES{S>4i z6+;)mSvo8p< z$k}**|KD8Z=VWa`Dp`8Q_|F(>pK)qASrLt`$^LWl$g|#Y zk$L^BQx~WRE(mkPF&|4dnUAw)wEz6SDaP5;jI+sBv@y4n9Sf}wm}UHX&iL1OwzkCh zzs>lUjP}_#7Mve4#MR>+7m}ICLJo3~hj!_oPZppMMd-GUqL}P*=ESa3#>&iPs6-9+ zL~du@hdJ6)u+MnBnYj(op8EY0-qmqbkRSYiZ@Tx#2j?5ir~kkq*Br(XbmAZNYsc7| zj)@xw(c-?XUB>@tM+d5hdVd^1E$Vtb7aGurTF-VuJpJo`o2*A+j{G7WNf&d=adG1m z^J#RXi5qQ)-3w>9?J6?9B+p_`y>tJ_K9or7NOuygVMusMe#HLy@4i>0JBijXd@%mk z*DcRe_6wCe?9WvPAo~AYitADloxeSl9ERZ-i4VsAu4&2FE;efy5%s}4P<>AM!~yk6 zt$L;Ir13x5aLD*yxY~MqY{fGMV=)d5!-dhe7|-0|xeg+Yc>Vxwiy`seEu?!_oU8TMGKS7QBNC3FAz|Gn1mG1sF?TNJIKuN0;N@#*Tnnd-mUbP@Ut z%~81d@9w{EWA5Mo_n`XwjQ)YPw-wR3J?+Pg|2v)QBg|nOK_`ykIC}0*3Ma^}o9Xz; z?)6FLLG)#C8fVapXzY8IJcm9cob>#C&Zk2%bGdnkefk$o#%$vN21Ty^r@K1C(D?~L+z z%$k`uoh!n>biDaT>_a&!@~r{HepFevH&2-P$iPBmA`3alMN=Cc{T%C`w8JfAt9H1J zY-jHvtMzrAi=+OLKg~7PHCqE%VvSC*HGqh>DN8Bx?{NBD@#UlaVe26;wx55i|F3^* zWH_!aE#zJl`L&J+#bgP}PIzQ=SeY_ACkAuIUkw15B=x=dFM)02vfii1Eh{?I7-dldcOsL*tHRA}aY@VK-n zvp%5N`T(>ucNDwE9{r*2GYrEq5)H@1QDP4|b4zsXS?BLFk41<3v`-L6y10ZJ$89{) zFahc4dSq0XOm^3e2vf+Zn1-G(GE66DU?yf^SK1X}HaQ1#F%NsptCx(iE`hn+Jo~;3 zdg4-JK=!HYXfs8^O7CX1<$s*`Cx{=dW5tj5 zlk)#@`G1=7FU$d9YRS5Ed6R4yt^B)Ju{cUlhUgzsm65$rxn{0mu17QWDeL9RZpB$) z`raKIRI#`5JA}iC`oBlWP8`E=^y>@duV{VD3HFod!qU3v{O_cG{oiT!Gw4OntJcAj z=g^1lx2=Ky?YsFunM^?{hGH0MXPet6M`AR_;QjqSW0|8d|3Qpn9*>SLb%Q$L{rT_e zX77muDbfdZ!=+EM^g*MvY9jmpuQpHm&XK;er7zy!Uzziz0U=Gi6OfKE%F|?W3Z|lT zJpIK4_1+Qbhl&%@5Br&`5S`i66~7`(6Slk6zX+VMFg-HMH|-ZDg_+E=&@)6nCFfu+ z=0$E#Tw(p2{eMv#JXu|)&YaKv{rx`~%;mG(8~y+Pxs%SmOWx{}$54UN9Am&d<6pA8 zQ2VDXi_XQ}-)5XxV||`D79tZ_$U!dhkdG#9WOJqVuf+U!iuQfD_8tA_|0Em#AJhKf z0BZ3+{~s_q6ROC)}n@6*N&u6Pj-Jv ze?S>22jq+%%g^?y^9|C#cqcBgfr{E2q?xc~ou zhyP#nf5HtD$8e0qE^Et1lO^OHjA33q`j3fY$$hEvZ?gP5QvN0PvsaND&8cmeV{UEA z(6GMf!(rY04~MlghB;sHwZt_u|CqRa_IY7jT5>3wa#`3q_p-2M#&C0V&a;?#Mc6dO z**1@aFi!m4_um!9le=nSVdru6QvHCiBiHv$xII*-+@)_hF&xPApR)Rogt~-l>@A!a z+HW2b%8q08(_KdK&O6`#)uM6Xps?Hd zihFYghdqUZe0S^e@AV(Fl-Cj$BYXTG6LW@L9F|PDBrNT`BxLVM4e735KIX!(BK4xM zvgo3ampUS>8a^tl))ta5@&hx0BC>!w~F)@NQG3X=T?ZRUWmaq56D*|k%!dCGZV z%Q*YKrkfA+&QrN{$&WT^hOC5Wf2nUK4^HDg9W&5l&S@qY?f;uaM(2OeCg)%-<{|p` z=X^2)3z3P2BIBn*Yd4u&a*dzbj1ib~(SLr~boKU3eYt%7|FNkdk9$4}P>4OV>?4?& z8j6_9rB zE>z{wAD|KyIK}NW&Y%}(aSnY*=u%$skL#yX*i$hS?RDzkO6|Y=z7PMU{ZG~ZmnW9Q z_5JC6h6_IuqcH|!F%C^R>bbo4_Wu;=|FXBSx04-h@Adz+c?VAEpJ?~$&glQ1)c@5t zXyhM_AIFO)4HJ-#h8+2)z?hmjT5}hjeLsbHD*FF_Hb<}5K-ZJ{ z;KtQaKfC)*<81L~qW$8ekVR%!#6m8)^hx^pA;}?+IUmtkJw5jsfA?B@#9oLZ6eIcv zZ+#aXBy)5o+`+lNF?05gq|h-zIiIPHj_PXth|0))Wm2e#WVqdR$^qJN2>ax}C=Zr4 zD+e|5ULXBUm%K+mRC)G2z1kdlCOV#)5_%>2u15AcvOa_EXjY_`N)1h9^NdI@l}hiQ zYVBWYXgix4qW{0hI66_u?%2&)=jkJjdLJgx@I}C@p#K|9)-8-$%5mC$z2D&s=p#KSDn!MYvQ9 z#V`!VNQ}n+@%$V4r=!64*Ou4JvH#C_zc$nS19Sa6`U$cz$`fhU*@v%tJl%!5R7mv&=`a7vO!pKy(Iu^dF4U`TD2WhjRQc|G%lYGZeb62*oHt z87fhOrn%~_9<3wQ4aw?-RQf;izx@A3xDU?fV{TM0)Qh7TZP;aOafmD#Z~TG7 z%tz3P=-&X*KLRTBJEC*D-#?$X=TiOOVbWMxk96_Jggt>Jf4x(^X>9>>7f#_cy1z9b z43q!5zHaX=`~OY<*DK6foI@WHy5(D>AQep+#s`_wnQZA)rq$zZCzbId`d^3Wf9n4a z`aj>Lahkfv-oByYiT?dFjLg0?Jq#yDVl>8JEXH9x(l7x%_9~>4S()B@eh8D9qctv5 z$!U?lcBB14XD_Tt2-DfSogp}boQYXj@+;}{rakjT>EXEdn!;^1=3vk9bo#&aFqgUf zoW0q->0uu8d{hnBuNrz+s1&9G8Qf}%?+OdaOf*#bhdG&p`Wk!vr`nf}T=qP)Gk2gB zZ77{_S16lvmo*A^g>teYQ`~diquKs<GMMG|jO8eU|<2WXnA9W{5Y_ znGIwId-W0f-*MoC{qLw_u1A9~jq@k<=bc?$lVoOP2z$;5FWz?gmXh9RU%ux;_AADP z-1jqAp#qhtJ$Ywn=H9=)zs`17s1m_s{<+X8z#(pXZaDPmyF?Wc=T&-!a8EKxv8br?#yB{F`QN3Uej)YlEs# zAdV80p%OKyM>E>cG~fI``v32D()j;4{p)=E`fc?KnO z-tD`TYbSfz&tm^D?{!Q+V~qD4d7exw?X(|J{yWFN4+*E#!!OzAMJ|cc-(6{cH*+e6 zVi<;FBu1nAiKH-wjLyUxOOC^Mq+tTmF&R@Z6+LxHVH!CdrN;aptiLFulTkJ+v7fmL z(fa)v!p+1i%*Gtd#XL04Q~r$;4jMzYpq05TRoy>c`A78$_8vqux7S)j_g|(SAhY=GnX9i$50!2F zx+_nAlG~EM(x*iGHG1g^^oJ|;hbzdaE{M(q%M~UM`6xgkiqI{si^(qaUI`hU4OB)} zq6YP7MjH;Hw%6P~ImUN8LUy9zw0=cbVmQX!Vja*y9A`d(jskrXw3Xp%bhw6r-VjnK~A zfof@3g9C`pN~|0D?%d}4=l`D;-x>7cEY6`13BUL5zi-x#xak_`{x!{mUvfuu>O6jamGyJin+!W*k5Rzfm5C( z&2(vUshEZ)W02-P;|#LJIHiC6zwvTA`q%%THZH*dVQPDgOO6|toHR}$8#}oj za)y7@f2&u2VY)b@y#e*cIPJz{E#t(E?5`$<8T>kib02E{dyIRHwMT&4F85lpOgVK% zMYMK)7Pr|bDKU1R>%B0CeJ=LT@LtA374y8OF@9~XGuOD!M+O$6A@`6&UHM(+%!`9OP_;dr2lE<|6J6s=g-`M>Z#Iyy7Wga z>e9TMcpJz@*X_zp4V~gyVs1S8|Hm=rJ^o2j!u>dNc>(=To-+cOPonA^{f~Vjl{xf3 z=;Bser0n0PT-}`%8fGMhQ`}CY|NM~o#s$^^oMG=p`*`C5v`$wBQJrs%P@%O!#p)o` z9aDDEfao7xO=pcS&MCv4;yLWPlddD%**nOy;yi~wB>cfQdU|+BCR30h@0N~MzLmS^ z46G0Im%YXhs6wi6Lop1)(er@wrpVD4gYN7Q#*+R053+y#p1NR>zV>+bU7mdc*}wkY z{=RhPJ=4{P_0~T!m+SxR%b-7Do{Fk8bqk`k2T>nr8n@|~fti?v*_eYKeV+DjC7B;^ zrY3v$#m0x^d}LrDGLeNGMC&_q$$EXh=>N`n%=sulw1&JTNB$_1KhngTE`N|^$>yN_ z8>L*kxKDdn8R?)A{re}{E1u@ssBMh)97SXMLiQpQqXf;92ZU%2pnrcIeP{pvI=avI z_t!=L{)_$*9Q`BrK!SE%AEU0u+;^opaB^3^z5grCPh6+`>i?CAbB{13{3@Btk9e*= zeF)}y^y?qy^IPaSkC}VmR$J)4aa`sdg3w0}9;Kl;A9 z4COmh`6e6Lr;1}5YL(OJWWBPJy-Z!t+@O3Mj_Qo-M#x{<*bHrJjeBUDJIMBC>;LA) z!c1XjVK(Mq&q(EOxHAWt``0h0Thq%tA64p-sO(pg(fZ|5ZD4c;`Mx&qPgYcVf8}Do z@=+C)lN(2b4A(70CbE!=rC+x`V8qDKq)avE>3@+e%2q4$2lKlN>7S+r|-T`gLJsaNN57f-B4ZHk9Jcm4l!#ILH=EO@%Mutx2@?qw`ag6ym zs_L%@`%#%}{u?K_okSN-;UDMUPP3muFY48o-@8n>I%)hXeIWLy*h|g-mks-e^#$Xt zFEF>i-MuA+Mzdv{CQ);8f-i_G;HV{8VYi5w0CWswE%SB>*>&XKN1$_T_3Uw zt`9j~H-;r6KN^-U{Aj3f-z}bF`HY*wim@N^-!uE+hF=s`^btD- z!cpgh9yMlY=C@0nhso#hVseO zhHN{n9Ff!kaoeHe&BJeq{Ttab^WxZ51J&K0VG^lJ8vimmK~-o@K2mGO|DC*nyjqxa z@+;&$Nny_}|Ec%D0BRk{q zxyE-H=3nP0gvBRE$Fk=R2s!5dmtZNkWyShbl?h?F`REmS1HwxBhrE)6@HugKJX z=JUeNsrKUJjEn6atzI8T56OLRz4p0yXsm4d#j)uBM2(}pM{8QF^Uk!G$B*wbbgw`a ze#r0N(S=uWg#QI-DpnSJ%SLEA;=kahYZ^b9?3;2sn5>REENe3@uNw% z#5${Qjvc*{e3`ruU&q5Y$2wO==JB!4i^s=~{vwhej~)H?m9fsBT^l=kGxIgvhuj(K z{Q9KW(Mu=AI&Y0+daUyYAB!Em_fF;sv7@(tEY?}dzKs0qEwQ6dyU*R^b>u+u`O&e? zYd#V?T7kQ7j~(4KCf51<#Msdh*Tgz6ye4)u`wrLN!Jm68_X=VD>i#d^9_xJK_L%AI z_+O;YWAgYabY6MAYj23{b#%Aq-68z#ABp{Y68r5Fg$X&6c60<$s zT;Q1-W549zqD~zkjhfT1iv3g^ehnQj;l4-Q2gt|e8{HBAdo~7Pu4?l?yTXsgDocih ze`PN}d{r!w|8xAWCVxg!i^az>&%&;ROJh5@T_)a<*gk7W*fxEL^&vyTwd`AFj*cB5 z>(Gc6Tn{R{_zPG!?jx~Zl8^h=-@)qSAz{^g_4J$};c502Q?86X>iKiiZi;=4{05%o z{#|kbsJi0kNtZ15jn76<{_P`L6q3Rq?%5YA{}(%N?3IDW!?CdJM*n8nog9{jC&T&t zSFSlPe2BaZA4Ze9B>%am!rJ@3AJ*N!FkHp$s5NXG-be{eDzY^{ND0?-Tg!g*uj=qR z?sSeHjlzvk?(_>LMrL~NCjF1Qm_LE3cm$;@2Zpj&o(hjKmp`2p`opbfu@hst<0i(IkDeG?F=k?H<;KKt^kVuwZnf5T z)LGwgG#Fo`SpR|O|KUw4%cczXrK4yadqFrMY`elA#j*6%`QhlR;#Ze9HAii|`gON* z8NVvl`Lr^(UO7)18|!>snfa=4bSq7Vv87kNW7@yjyc5+1toN*gp4; z*h#W0a*xvCIqxhj4znML+)i2VOCEdA+;rNr;^@&^Vy7avG0d;t5^EYA!fEz1k()G) zzHOA&jnh(%KA9 zWa$em8kQ6m9~q?oMSpwJx~SuW!qQIm2jqW!i{;MSUXeO5tQWJ&rw;H6h?8YN*ic>;lNPS zn-qSgT-W9e40U6#i`9=FWPR%(?d71*Gl7LP~4}TJgc2d@c!Dy z65;lcdtFybma*?6%ZF=Mb5p#3bUw3x0r#bZ>V%X~lj(fr4DGb*>*l6dJE@%(zR~qf zWOLV;*g|IW3TX!>;+&>^(l%QXT73!QT z$!oCezNBE-8NU&_l<`|($QA!R4h%7u69$CWnBPEy&Q+s-2c2d9PaI@!*-Gz{P4A*p z_zt)2{*}J#Xi`WLPYM=?q;Ni&V?L$i%A{}!^RoMs0##q!_E2@bZ$3`{ODD2&v^kuy z1H$jPjnuB(gKGMmFOc8ED!kyj*OBbnE1+8uzZVbT8))@zi%#AUTYTzhS<8v*T&Wq-Vn<_JUDD{?#ITA zYh&vVr#g4^n%HLcEhnyzZ6XU74hU;p`xk8O8yt3czuhQ>X`XmBED4J@p%p*CR{yE5 z9rkP}9hwrJV}Bk;@iQ#?)idE2o0W~fTd4AmK#@?WO>mudgzGofzQGogOkGogXGap5z{@H3%##xv@iXF?0v%C9Z$ z8GVjtLdQ7&HP@C`c#dC_Rb<_~XYKv6K1*Acag9w^9$pR6DGm_&xrJD{c(&_>5;m)|_XreudMY37dyM6SiP$>NBC}>@#6o-!ox5xr4oU*fU}0NM(WC&0aF$nXqT< zGhr`ET~jvanXqr(GuB5uV|~Ok#{SQQ{p?j_wd-oeJQEI#dnVM9b?MTdY?%4I(0Du} zH1#bE&8IWM!E+g*<;23!%G}1i{a8lmIFX@HEC^ZY3&NtQ3&P?l3qtnf1tDkJg0SS= z6JcrQ0^=WZb?nP$F7O}p?}wGbee&HjGami14u?Z+1w|73>Zyf4Id zractfm9il09=#xxjFbOzzYyD7_=Q+$^A}=e84Ijo|6cf{@AXgkGX5F=g5_9+J=li> z_*Z;+ocZd~!`5gWeNAI2*>kLz4N8kRmamQz3gOrIYumyY3h#%soco7FmpANP7 z4fAi&jX$97-ls$T!%tIc4hUH}1HvLY^u@@|)&IqkHsfDo-etzKxu*t%q zdE_eg)#RF@0U^KA__t(0Sm&DcWWfyo!I)+IOKzGMjeir2e-pwMaw~fg_ig;QlRJ#9 zi|gsw+M=^462flwlH!23ie9w`}tRq)xy`*)33U|maJp1 zpFzJm>)rY7{`v6?`q@mnPqP2~1ODy&JIL)HlOFgp-ooGT4hGy3;)5{?ci#ib_rj>ckwKiVl~!bBdSn~ zpW;RQ7TxHhUI;UVFkI8J&*e;{;SC~!spY)uf;m7_wSa18Rz@{`o`oY^Q?s# z=llNW`~J@9VlU#pjsJFXN2PPb>aERhGZ#tjW-mE@zVCm&?{D4f8S|rO&5!n($0aM+ zE4lCIUqx06UsG>>)AhAv9ee!@^P98GZ<0;(%#UW6AI&sBO184Mac}3}LAK6K3T+ul z)-)xB4zg)VQfQu*6b_Ot3zMuDO|o7zDKwCc!c>n;3N@)o;Xrbd^`uFmVg~)?tfa7? ztP-Y_EMwnCmWyY1zWyBcU@vx{7(1~GTc_&}VjH$&6AG~zTd;np{vQAnjqP$I0 z-pF@^#SORxpTpO z@f22L9X4V+c405dQH2h?h~MB3cnv-HGyaBmkf^V40WQHPT#XxW6K=sBh^sHM>goT~ zAB)Ls_MGF^-Ji7Xo?LduSnn+TUtfa!pCJF6SLMEn|7voL@cH!#VXf=ek?Yw_nTLDC zeUCoNceF(daKkm)k|&LUjm4X}FZ!7^BAax^tS`!PmcpV}&I^l|C57x?Tpn`PU*>ER zdVJ%+WzRW-I9wQ(mpX5*VrW-_b+IzRM$oBW0QuZ-NRpW)vev&Bf?3$ zE1BDZ_8+$~UxV+C4e|5GXxCAPm(jgjT=&xVd?du#pY}-|T+UHD70L)+RC+8k1IK$HGmXGm5kS`8?CT z?1g#S#x?IfZ?875T6nVk0Xkdm(H@S%9J&f)yk_Qu=3wu4UCWzE{qMEyFhA!0_obLy z^5^Q+mCT;a!r&b z`pwaI>>k3N&#qq;*`xBhNo}+>GH?}phBeG}=iEeaRicQ|)x zx9DNltG5;Spt9;_p`9Wed}OyLrkB-+k6){I<;s`YR!9u2G-y zU+udWPWFww=k~*%ab+y5z0_WyUnPdbYxT2^aK~l}kWJ#>@z3wEksq`NZ^&17j_A)19i5{*V1Kln#wD9S5~xkc&TpMTOkvv{I* zYjqU%+tNfGx=s2=-y%x?cP`b|4tKxmk>T3QwE4_8kT+p4^DX3UxC1>+u`r3eCo;!9 zzqGqRnA#JbpSfO|)g7YOJVysWHqWLH9c^x(`5?bGVOoXlkS6WS7rFLR;`%&!2`SJyIz`>*hqFiyfCqa?7$E4BYg0G&lB2zWXnrA(HwO`Qdl}NIV>AP|CvVr zPnWiWUT-BmTi%@Hu!?!Lytbz7f7AZYFy}ohQU5nl|JS$4NYww82j(Y+X#Zyswqg6U z#IU2@+JZLcO_IB;XWMwKhxcP?@Kd~i7x5eX7H`N~-Q*we z8hWs3Yb^Ykd<%cWJJ@>m2C$MIc!4^QD)EXGo-z-p|+!Mx*PBe@0J@wm9Yi(Sl% zl$X6^IRgR~pY0(?1{&tDuUEf7EmQ zE4kLcJ=giq=X&jTfl1*FefD3Io7fAF4+xvbT}%%Wa34&wcF=^f1Ns6jPqaRAZ3G3wBW29)U|ma2ow$$k2bmGkIG*sDg; zk)W7)hk9)n|DEPJO7itHx$PzQ6k4ky?S7A6BS~6TlYhlk%!9CvSv4JR!1MS!mf$*m z`^XE)mE?y>U61&OjA zJFTJnr8Pv(P54BMd+yghl&G_m*$s5Rd&$@ES-P*7BpcHR9#V)xyD#19jjg*kfP24qXpaBzBK@)|lv%;lB&p8->69DShV2 z;Z5eZBKy(hi9J^?O^nxH8nTXDYW{CYj7dHC{>*L!(bm3d)U-E~=5!##hB^UkMT5Z00F&D$1`8`w9Jn=<{MkKD|@MLb() zUJ#0~ZR!PKJ9ac*5Q?$$$OT~+cGnLMC2h`TAom`3KK~i>z-R6M@3a4(tYEL?zWv$F$a8f@13oa1$L-P2&1I-O$67!^MLi|aT;V0<$h;=#m34V@WBJDc$CAMM* z4xk5WlAF}wAFO}MIFJ6%`lmGGwshk*a!03g&W~IFq;B0s?(Wt9!5-{I=`nTOx%c*Q z86Q;`6IEh=r~c1jbz|GRXN(@uuZhkWt<%qm&KPZAkIoov%GCcM4^CFcW?COT$9h4} z5S^jgFjjLN_n*cRb}=$)HwGd!#v&W!C}|w!C~h#d9!P9*nLjkoDi*1 z7#Ny$l$-KXoo!`aoos71&+fY0q-{G{b0RfVA4|3VJvAI4>-pDlZ)9&E?-K5_n2jd< zD>h;;eu=D8sVaa}?T0f;kb`V2wU2+vxJ#9-)UZjMTgc5QnwlE6lH2E}8aJede0w?8 z@Lxx+%{3=Ut}jgWPlVL4f&KUL$4A^}9e$2iaK1cx3+}^T*aP_?jKXbr5MRdAcmUtR z6Ig<^*o-o~f}LnWC4Pus<5di}R(*`on1M|=6Pe}nNAVcGiN6SYEBAZxS&YR@%tba9 zU@M--Pf>vr=*B@@p)6g9UgjIfzmhR>0zQcc@CAGoPa+2^u^Z)R!ZG{?XE4AsU5rr} zi;0+wnfReP;rB`6NH!;ni_UW&JOEiPo{cT&7S8ZJK1|r_a0fn%*?0m=u^B-e7vKtv z!=3orK>AbYmc-u~pw3DW-|ZKLkCWrL-;7&v4ZqXcpcl;<{a%~%Jo(?WQ`ZaE^U6gb zh5VhY^>bmDV7&0A{o`+oHiYlW{wnh&*ooJ08IGa^J-7ltMH&7R!F4Odo5wtpOvK&X?#2C>fXnfBala+Z z&CD0zkL<_U^YMslZsGPJ{CDoZ;I8p3cJZON8uy*cR$U3Hy%d(BF_rC zAn&am{bWMq0+N-p#N^xU*`!SG38L&~JE5t}AV3WeCryaikN6=56mcI-Ge zDHLNT^DgY>SAspvdr>-JQYgbd=5kcwTQXlL#~{G9j4qN(X&F|uc-ha47{cBe%2fsMr3tY5&Pxo!bB7_D-EVFGPE&_Oh3r zwLiK~`%jj$S8%W7zn`pPudcVh$@TsF%ct4jG=pw*mi7K*BYRVZF;=EE&}0jHEB7}3 z?PLdgg>P3`B0VdmCt1o~Hb;8TliuWR-@0Uq?=j8yAa}4AC;MK$^)9ltNP3n?PuF*l zP3+A%(mPLjlXWM35AF^88_8<+n!~yJ!5;`E*41jO1ua zp(8}qAaM*fF68z({C)_X6FEw~ztq_4@7ljA@-bt#9~<}mo3YrraoX9N)57)SS-fIx z#&3<;wo|S=o@SrpHDkk8V}v4X!*=X7hu)1##C3x);bLv-1>$VM=Uw|azlS1sJ+$6>CbZr9-|5GD zLPvNtye9r~@pt$x7kHlC59q%;2Vl>`=I_J6Q2L1Vm1_oveP16I%D;6dz2({Vd8W)LZM@$d-tTc-E$+{tF-iyT-^l!HY4J-gWuBX#Z2p{wM!Lz$)g|V`5>=d~YFWCQ(GP1}{~bZdFo53omL_14qxj@8@G*=xz(lxr;yd62y%&6*MH z#U2!62X1&+#b#!LCo_|B!3GDKCCPTz|ne@>!j0R>;eri}E_Z70Tyt z@fQBVuh#W{bKM*A>rvrf#gp8ha?OwMJ@?z<+DY!Q3$F?LlzUwyuGRR9XSm9}26^^I z;S>2^D2_Pp6;Frw|KR#t#B;f@UlXq0y;eqHjF+=|uM3Op-(8ICh4Md^FfYZjna2NE ze*Aj>yEFc;H~u%~UPZ2EUvu2}|D^FhxlTD+f7bZF&-@3uk$n^QLjIe{Ey8cDH~-=K zZRB?L9W%^-6q^4q7T<;4^VJ{NgS{x7Zj6q78AC$3uoctHf7JVyZN4d4-6_u;msd{8 zGh`im{aJabPo5&1*qgZ@|0zU+@ILr61GLUj^O_wN>dsuz>we{w1NvLiQyT;*{2u^YmQ z4CQ~G^3Si;wSUB#F^Sf2HKQ;|l{;~PA@{g_Olz(jNx*>c| z*d0U7L6Xt`_Ff~uB5g`imH*_#FiCnmimzZ_it|BHA+Ad7XRbo^Li#@(V6H_Szj`z< zH==0*{U1I!pYYVU(B@w4!_0rWPg~w7XBKD+i$;ZZH0MWW)@v)tgN62r&#-44jc7m( zs!@vrs2u4`d_3kEzlk#DQj|LzcppmaKi@rb6kVn^ee9@EJb6^u!M=;!IdxPha?br$ zY%jLntIV8y&Zw{nTd)}g+}88km~2c%=9lTin;T!(JW3yagt3D)X`Q!+)mb5|nrl94 zmNjeBtyP<1&Du0`*_GzBm6fH+)Dm@GR;tMwEIw>42$p*0a&+J~_>}zoIXsFL_zf_8Jr(Tcb6pnUiUz8>XAd{}qFb?cI7J zDSthV6{te=FPAJtXUZ-{cC$3Y66U2?#xM8yh_GDxtsqzGcjRFeR%1=RIl^%x%nh1< z6n1^CwO~0P3mX<*8a9rGH+x_y+zkR^E|6F2d{V!(UT3;acOi+wf^) ziWztq(=Z)hFdlvDe0?OG!Q1!~re9!B43;7dk6<1$unW(l9X~`h8j*y!GLkh#{WsJ1 zNA^_TA4?Yc{xj5nW!7I#R+i?N|Cpkz%~t;jx9ad6VYO@5r1}2o)_;=gI(>iNq2Q#s zVR9q;rnA0(pY!9~>wH{}>u@t}$0zV9_%(a{pYTOIif`jdWMUDPV>Jq~4STTIw<#xo zjC5QHzX6Z0$3^J=uyjQa&f+xwgx_QMCE6(bjk&1vyzp0YJEij9Nk>V<2Vnr#7t-fo z!)f{)q;gBbMOc@6Ubv7Pg)z7R*WsfWkBP_@cg}Hh-6zfqx3b62m=kATbkdwS8T~I{ z7WZ~-@>OILdovEEXq!=Y(s{jOoEJE5K=>s8YWAAL&hs^oUrYX%|MisEFG%5DmidXW zFO9xv9DNdd3HDG_?8T1J^i2t$2sniELYNS`a^b*g;X8Nxj`mZedFKl7n zDr}K!wiVESQIzbM=XvJRe=Vf{%JjUmJ?|XP%Y85R(iz@oI{g>QnJcE!e@&zRnnM3I znf@!C{%ZpL*AeAkeyda7>*c)$vXN{$t^A*PcdXPRAGUTW|EKh+xJBoKWf>#JjRO~B z5p#~VAv?wYZ;Wr37(3>2TgGn%mNVzgHwMly_RTZ~UTEx_Yz*w2fOS~QT;SZD^~RnX zg^k9*h2q#Gt}VvQn~gz>MjHbgH*e>@jeGG4W8jm|-|BZd;em=H)-sfX`wZYL@ z;$?ltz~`LJ!@iF_I-4l^henmSqO(M6#(q9lJ?`_d2Ys`ot|K(y&d*!)Zzehbf#T3 zzp63zf{ZrKA8T*OID14=?G;IR_e{M~dWn9$z<7H~(&-5%&48i0`75PvRt!> zTzvL|kWGh?gGiUL6w8o{e8_13Bxrr=f-%M`d zzm+Uv-$rilqpKi`*>{q=`0pl5*!PfoT_3$e8T&r6oPPyb$-bYg%9I9V4f_GImVX^t z&)z^bdfz5#(Tsz`r7c>S+t9AAh}!?Sa+Ebi*`KBkAhTzw1IQ)pOUY%K>Hu;%`wDU; z|2%RP`)YEH>+`jbYq1XNQGgBDh)pQOW^BP$6k!{-W5*$Dl&}-Kup1@VV-v+*&sRFr zKRz5Z)BhiH&s>r3>_K(g{-V@SB}{dhu_6wfHOJnj{^MVd_s=(Os9H%pwV;^89oq@J_!$RxsrVkG5578+w zZ=PrWvvzuuH57%tbP4R+XAHIuWN;{&V@*YkwH4T*eJ?Jy?&qYn6`j^tlvrz#W~^>c zbvb+K*}>Mq4i5Y6qpm^)Dsf<*e!#TB&bb^MYS1ucuz8=s#%zP_LmnIsa%(}8bsWuP zJ38?H@bxC}aTRyI@6TuOoj9@YU>L%9@-*WGhmgU5Yz%l3n>awg0Vg=Y5RzdM2N>dj zSwft|nJ|&M)!JM8zIV5}wYK(_dhdXwNFrFqI0j_L1YxygPsTIhzTxIE3HW{M$jN>8 zeLnAfq;LJ})T!-Xb?Tg|I(4e!kbtZg`R*GWb03uxa^B~=Z^%8*{s)kcTR{fve_6!# zr}e))$4huAQ3_@2R;nB-Ht_vFR6#Y=T;TctFwcLU;q_-O?f-N5()t&k<1Lds|DXHl za(~3n{s&ul{txooG(5}wq!}P=N5)6hNnfCgzCbs!=VA6g*z-{ZS9lZa7F!l(T-w=*L9J{?g;QRFd;U%ttX|5srcf|1~g z>}8H-KkXXnzl(N`e;>zfNDuy7+}sDMxi27_UEC+irADYbuu}C>FVezK?EWFaXu-F7nRfB6B?# z>F-@sdB#PRLFKB8>^pancl{UH^Wq}!`Y);l>Y)zzMq~r{@z=iQBJ+3`6$hHLS zfbST*N7*CI&HOiGet$FLe`E*8oe{?W&|SdzAA0K<|3e@4ei(S4`7ap4J`5u(KIHlT zp+=cQ7=tk5{0KxL2Jx=tHO5KeFu&3Lajwa~fNFRQN}&9*Ki5vKz1N`%zR0z;l51cW zG*bVikbo4&2Tj{Q~qyo~)#+=k&p?Bnnx@%6zEaQjD&ze8EwNt_orW*fn*AL4co zZnr}jZcCi|1O7)i$4`l6{g&h3!S`_cC^Eo1k5t=k$;2qBR`GoN8W;r zAU}gV$@BgjltT-6AQPSc0Rr$E{5!C9V-{ODW?e~Hd>jV2eg?UYhPa-Fkt1A#A>=5> zW5_VqU<4WEIEIXK{s`fw;1PTLHDYfnzx4e9vULyR z{|MuM@NO7hUjI_QyhcB;dq~@euQSc_pT0ylvgZTxM;YZo8L<6K)<8AS|7P?<7IJL% zR{kGc|D#-pVS(`5+zTKFbMNELzNN_F1_RpOxqKSvlYe z`K-cfpH;;9{6U{pQ17!!=M`3{Qn?o7t~$9Wz@*&O|#Gd!TO0M}GuHiEoVfMjl_O5b;IVvF}j^`yfGtc#?!o z5iU+#3CQ^azc;#(=QU+!A>W4&puCEA@ocEs`V+pXdyGA+KKl{Pz0zjA@-r8EZ@8@g ziU0S`@f@vtUH^q_{J&n;MdaYkXLQ+l9xGLC)=<;$+3S#fY|L>^o0WGx?@IZg=f87Y zyqa;;h^60xqs`KlxS8K(s^FWbTE6p(@XcTj9|`^TbBwQkVkv|3e+B;H*R>Jp=3|WC zUc>wUgM7R{EF7C zR$<0!-p8%xecWo+w6UMwD)!T3U%g^*X0DcleZ#VMtmek|G39Mut=tW(<=V2EF~Vv! z9$ZZye>Huz)%0~&tMvu;^E<+Re!Q=3g38^iRdLs9_Q+bzS_$S8_LIkb%qOt_UKR92 zRx`KF7;kVj&$`v>-}9mRx>mC$Yqfg8SH4=_YWC2tU#+(6)#`Auwgl;SuU0$6aF60A z(#&r$Kx!`E`YpfD5c)q@>ww>3;u;FB)}Uv#2DX2wG2|%eghHz|0@*KqOuxO2d;d9{Rm?K$g5|4^=A71)hXpeQFcm&Yd^~T*GE_oEki1uY_ZDDCsjTeV*FoWRleJ5Rh>+# z8vix(N!I^WSapwASoJSdSPhXRWx)P#A29z>&7N_8$@~8c%zs?P{0IBPc@KX?J~roU z-~5qF^B=FPgL#(!-`_{z`~VNl!RiwlYCfT1*Dp0vaDv}%IHA$G*EI&=^RFuc(f40h z4C2@mkc_;p6lCprL)qYX_zgKB2fOR=Ys!7$HRU1m_r9ir{jaI;@EeSS-(da28?1kL zgY^$@Fb;l=^^dQy{_!~|2Qq*nbT^YJT3px(|rGVS{)yprmuUN^@ykW4YlJ^k_VtWmuX>BMtZ&iY-=e+0r6TNw@FDlNfz9lPedbrn1sCLibM9AiocxtCxsTie zx5GWKADRf?c<3$e2XE=y96tlK``=hI3IX!6?5WT8*5j|_jsM-_UgXp6YTTwS1P}rJwG2{Ebukn&$7O) z`CC`0>9H%-nt!!go?5H=XVrOIgcPkXe6;yc#ya-w^+8$j?An*DJyT zkZ9)(Izu1pq(=`e@5lnFv|RARCzn2%3l>_{xiy&jVRYfl<$9{a=#E| z{v)c=$D=CqgjK#d%Jo01N@UfBsH$HaRSmNCy;0SjpJhGjs2U;-RwMB=MWSkcFUoq# zs9Il)s%>plo>kSBcTbe@PmR@1dj3$271$9~N5Lrbe+^a_`RjgUP(A({t9MPK70isP z4}bkDqkLOaV-3DoV+|pPGol*V7v}e6q8gnX)!6Z87 z8B^-IF(p5Uu_j?mab)cMn4(u5Qv?})A)+zlDCvaeV;Z>-V~iKm5cwKxj%lDerheS} z&cqb_V2pc1Og+#&Gp4TNF?Bu?Q3vS+KB%?)7iz6`q;F45-n(M*ybx2{?glHXzFpay z^DReLyPWGf#darIu0*?XFZ3(#s(dTI*<}@Y+Es|ZqUv@PZ^)FyBZ#DSL6P6HD$J|d1pKCe%jT#xm|4s z0`jEWS^xg3e8~3scKKIyDuC?R-l@*(UR9T?Q{6{$tezL!)qA8}!R_tpBcA>h?HbtE z&b(idHMFXo_h0$e2_jl5W3g^m|kqsTGx6+Y0x`#-<9C1u6t{fa~4#V#e0U&Afyx`47b1mxHekaK%L zIr{?o2i$Y_1oX;%dDgeEKL&+euQC50Q1R}7+@5ZgAWN&eRmOAeJDjhm&$TLvr>Z`n z>XQN091o~=K0v=ez`iO4Rzp{SXG5XYbUvWw{e@P`TtKY{yVZ6i*Ya!$$ooQ2K3AdD zz9rA{uMMzPCZLYZ0d)=*T3wz(s~g$#LO{LLdoU4DpTArEg8|xqK!fjg^KD%~!xsV? zc|V}gy8(@o&R9C2uqU8MFu?qOK(SDO^{?$pR0nwH^qNx0+DM0LLLI7hcTjieu&&6e znD3=e)1k6-p4GagLoGXc#P&Z~O+o5#zC#ULimdvbMOGa$?}HBI9_5;u=}?ZVgMMU( z9IiYo`$!K1`V%TW+{0Rd4!OBTi{I-|(Q`#sA?X!xjpkqI&=`#F&$B{ZJ-oZ24?|i* zxDBr9(Eu{#DYBA}cPK#{eU?1L)^zCm$jFWk=CnKXBkTd<_2c&^*xBAC%X^f(AoHQW zphJC+pJ0BJXD!!5PgjS!{T=kT(GRz*@-}4`lv<9NHoku;wsH#EToIirv|kd$3I<_$zg_scc`dRld8}sz6pYx2fv5m%e=)^IyHp+qS9h zxi;1B>s14?ac>*n|F)?a_m(|v%zw4<{-;fzqiyoewaIs~jrE^i`QK|(VBIg(f$Tiq zrmoChbt8LL_A(dvO9iWY)psz<>c?$hRU7jkZ5rC$#`@hB6_U`Mt!c*w*2MmOsr+}6mUEGx9n!}re~jXmz+`RBGG z^KL7OjP3L&{+v&V4?IeygM9z&;r{34S=1*7(z(UU{D)VrOs{gU^D@@+D*u321&6#U zJmO{jpI60{xqH7?CG?$3k!AQTKi;PbWM#UK{_pFmKHSH*r4FkWx4JH`>eF5|%zM>% z+)Mx0%lbdJ)e`YCRxGjF)_Kvt#PX7!uf9+12OXAwr&obPU95j9u{txHR@Vlvy7AX@ zl~=u<5-T`ZV)Y^W$=3k&J$Tg1`lmjAOU}#v&&#v2kN&ThwWVH#XS|AFkIs2n|LbM! z<5eQzRg!$A{N(wnepUE=D!;DODy#Ofhe)YaGvBG|L7%G9POGurrv}{XgFe-jJFM1u zAAQ79t9fUs)r54O@0VkzPuXYsd7k^|Gx?P3a$2q<{mMDXnBk0%wIf_Z=Y7g2ozmw@ zt&)&WZmzvz+=p==TKR?skptA@*p7aU;vQPl&-30#pT(!>VW$--=vR328?0IMF^^3h z?(ivo*e4(7y>tDHA$)4v%Qf?^PXXNgJiptKJ##*FpY-wW%%{#tAJ>eJd#6u9^y^Ld z6sCRcS1I44L7uP0R_9Egx}f`f zpL(E|YdPpCw)$MfR)3<{YJ0Jdy!FZ3%>BK-*lOpa6n}NG703=U#to|Oa8UKn6<~(#a7e7pqim&ZJ%19>;>*GcbWT3kg-EhRXh4r4K?ffsH;BZzZka)c0{bgeOIXn z4)Bbw;=We0C8*NtXy4qovJYP+dwt)LtCaJ8+;XjqSh?>k$6y@c%rr3CWN= z>t5y`i;zrLW+jooU~ckN5Sc%(Iv~zK;cNxJ|>C;VbY6 zlrhgTPh3}B!`$d*-rwH9*yl%lTPS`T_7wnAUUU&l7)pgG<} z*stQZ6dk_`uY4+O9lbNm?^7WEfGo@5cPyCW8gN-hAIRf(A{^Gy`yJLRZOCHeP1*cj zMK-@}fy~La9wCnBpdS7ieDDOg;4$*wy@U4_FiYB}S?Bl4*COn%AGORn-V+=T9PVe{ zuEZMJUt$d(DzQcm_A3OVYkT>AvzPrFdKH1Q_&vrr?gq-?^YBIZ3OIJ3k~7%H3#wja zBXhg@l=lMnF{En`_pPI>+uJe0ee4u{a_(CmzFUA|+}$rS2NCI4OQ>JX?tV3KUuj(D zv>N8=vkvyFE?sKXZuhCi)vxMRPOA!km6=Yf;=O+6eX^{wV82Q~=vT?nez|A*nHTD3 z&&_@nx=O8rBeeIErjR&1kj{17FWx(?Y-AQ~BEj z^l?QCB32|jVuhKf89UUg(FDKgQ1Ee$;CC3mL&(9quGT=1IlQ@u)pwM2Q7pLYMfR`| zuREicXU5g)T*Gf8J-Mm(L@6t!%rDy>ZGLKqxJYf}QhAsEbq*XGwR;5tJ+-vz@!m6MiE6=Ru z`NzC3FFR}Yg{|6P!m2xvwCc+fR>Q%t)rf5JT*iCe%hfX2qt=4UnbTw5_l3*lMf$3f zRy)#v@Nxy3*D}|)R-K8p^ncf~KR@%p2YS@YKs*>sT7CYc)nC6>1MY}5cww!E(rY#R z!CLx%^#AAAYV>{Pfin|Un0kpobUtjwScDayW8QaV!b)azD)m9q%G!Ckvgw~X$cqzl zHe4px`M8z4F2e6$q^$g|%T)k{YcEsLT-+*N(B4u@DU#{+S*y_P=FMfl_zH&eB|1RhK|7EO!i(5nU+@E2D z>mhXEa*e)wnZ~xnt?P|UyJ6kO;FT)e5VeZVq^SS6<$f_`mF$RGrO2{!-tUC2P(^l^DkE2@ zY9H%9f>%)gtOL0#Zq+@<`;Yo7m=n5^{eQ1y-N%*m|F2XF@wN_jF)zXXFU@hwTOGH2 zU0168Ow97L&@B+UQXRbL?kotXtNcplKKXWIcgpIm=lKGCgHfxW7vclFYZ=_0vWB+M z|2Z1BMlM{b(2hcD^x-Qt2I1poJt_L1k6N*G+={cHE&)l_r=(U<4=WN@>$^#-X+_Lx z=6Yz@$2v`3z}Ic=RXs1`YgjW;dpv1Xd9F}3RAk1i%AEw5yH_r-_qnR)y4%;myyF$L{a&RY-o<+v+Der1 zNQ`vClxc)=9K8^=#>mG=C}D*Pu3(;%bzZcq0T1tep6g`n&v%sRD`@*yD0uP;)=2W6 z=sL=chga8R!V2)<>Byj5H*_+tyi(o`d?T~+O0~Tovpg%3mcz424{{&6)%zk&1Da2=5T-c1^NVUtF-Y~nrNCPm-7 zhJODw`U-v`Ysu5@E&4I``2H=*MP{wptnBKI^ba=5>DkEjzgaHG#hw@0$oPLV{ol>1 z!Ee!u&5WZq)BoSB64yq)|JHq~s|rrOXp)irOUe|M7__S~dKXxe@g-~VT- zXEK?C%9LkMCgYz>)&*suBFy;j{Du4PThwd#c+^!e8_zj&?wnlwD2_3{?1*GBB^tFF~2k+(oc{d)a9 zG7?#@FCb(4uGM#uPs3N>>oBnUTITB3^M3wXjdDEVTF>vMt=DLJy~a+iSAO}m^h>T) zq32o^K{2?YWb=B)*Vig5c&)Oxua{#L-_)*HuYVy9SHZQ+&#qVAy7i2&*E3hSUJYli zWqft5nqIg;&Dn3L<=_p>uiU`=+_kL9S+5GxsC;-m>r>bB`!wrS^Z0t+#jWRs=T6># z@04TDE;%9R;aze;F7`agKe9^>(#(Y)LkSdu7yiICS(dSj?_YMQ^Pa1e#O)<`8Qy{w z^mD!99o}Q?^YtCR^L$DQFz4OzeflDN8}9$f$F$;Kxt3rJe%HcPunIDugLdBoo3U?! zkKu0*axXj#mvejo`8eE3U$Y;-N3d5zIs7%}n~{Hu>_Yk>2nl!*hVh?u=yqivxm^y= z?X-XP{{UBTTDk1kkmsk5&VCLB^od?#{awrr;pOMivAB_ypMlYKKf4W z+v)#YIH|xR)9Tnit_@4_~0%L9oVHo-sIf5w(l?|Nm6ip(TDh zW*2ivyA+^3`}gf)ZfTc%yLZWZ^c{Kj@8a9*UAm9-AAoW4@Ezoj;3tp+RqzYkr(hfT zYvOnSMqmQogjeC0@GJN&{3ont4EmRF4SbFJ>ut!-6aHr8r{HsN5Bvk%jr(KBufhFr zjI^FYejk1U&%z1Zs*v~L_h(2ellu$u*SMdCH=vvTL?8S&&iy;*VwCe8pXMIG@eS}v zc%1NO@%wq~x4<_ctAKT&;HYN}0OVkIK`!si@*w}rgbLiOD|PYiteJJDNH@nN$kG*4 zD&sv`IaEL;R6#Y=KrPh0_X=alIW-Wc{rw+lG(!u=tSS$60=Yd6?C?k@I_dxX8?2H8U{#NKhN zf2c*~uU*6Yku{8u*6@wc8Wm@<=L3|?98>A`HQb2#z5*QJgdEPfAQ$o=e4hM4^nLON zaqJ05t|Nai@bHudyN=P%LqGH!L541{zXft^j{C#mQ|fw`@`0Y6Q|jehkTU3_jQWvn zgz-#re>uwiW#yFG3FC*r4CM=1T%XTyT|CLP@I`nYehB{zKZPRrKIbcuzv0?xMIPbU zhx`wY`;jsD1^f%{SAi3L!*M?HcgTzIAMi2yd!K+!a3gF3H-4^0-V0xb|C2bsiLAi= z4&+av7JCyojU7ticlc{W_CN;=!`yk=G17zIW5^!lS>&HF_cMU~YUC#%ggpwkV!s=H zPh9sRFCrgBJ^|0ePvOr#O`6CQ{)dqh$m`%3_8H_XoPa<7H2T3A>~F$5umpd>Jk*u& zw{Z2RNgv+D{U0g!ZN{DB|H<+1LEQg*xNX9%A2|#war-Fz0{gYN{{{A6V?PUj$MIis zd=dMN<{WMQ&*5tL8{BV1Zo~aE$UESxa4W~3;@lSa4EC%wzgD(+-_N>8Co+e1lCHzF z{}*Wgw9EX=Uo-#lYZdOM{X;Rhq2w;we|lPF5C2-_leGV%wEq<^G5<~bhnh1#)1SWo zJa9%0@6!IE={W5l?0xD_(*C#5{k47V?fnW)^H#znt9HVP4;co0x$PGOA@5Rsk@D2QY3x0$9 zpE1vRFMh7Rg*E`6fKM`Z`dj2CxE5}N{e;_wyc_O?Tj9&_DBJ-L!8hQW@OAt?2H%7) z6ZcPXb3zgP2)9z?LF89C_Ygdd{dxEX_I}QdK@6r~9FD;+-~_w@zlLw(cM16$coh5Z zkuMO($C!Km3wQ#zPaqq)UpR2jgC}u&2JXhanB!KY4`MI{HMpJQ{=||To#6NwoQ4ay zEg>5@-v-+#v)kcr@N;}G@`vDrJoqV8LM?Q2-iLgibbFBf9RCpeS?q(<$?uUPxFzAA zasMf7+JW92JF!nAv)1wa2gh}^f5`dZKjoq=<ppqq0&V1H-Bao-WzKMY_Wgdxri!wB{ejP79lH-xcAAj-KI#IYwJ zSw5~*^*DXr48BLs;Cti@)?>WHc;z&EyJYarD?^QL);2wyp=zk<%1|woVK0Y@JsGNm z;&mBvL&jxYuI;6>=UHp^@W1XU~L>!Ikg{_$}^a zoju(6{S1f#+2A*z`TYz?g4S#OE7{A8N;`&1 z`)n1iRkmxD?OJ8KcHr6x!&+~SkCMt9am*DP=E^bM2HRGGt;DvKVk@<6W!TDWoAFyd zH-K58^=BRR#%Rz6ZQ%cQZPX@Rqs{uHuH}caK9zYL+++;fw$<3S8RKSSY&QlQY=0{A zR%6^|j2#%p+g20Gcr`waN8`_UGro+c-SBz11MY;o;0tg!+yi^yi*PU82Yca5@MX9k z_Q6-+tMCBqhX>&y_!>M6Uxx$mZFm$OgU8{Y;0gE+9E9(}lkgNAf~Vmbcov?6@4@r% zeK-t1fFHt-;0XK}=5w-a(_x!V+jQAxu5ISoW`S)M*=DhAme^*gZI;<)xouWpE`&{- z3tZ$37pR}IsDfFqt|a7b;c}eH)gZR&u=7aDC#iz%yr^O;cpHeho2ZxNHX3r-XBgI(=Gy#~RQrx!urY(>vKY+jP^CBce=YUa!m>lz9{XMo<#QY|6*9J>bKf&$Lfu+9xvY z6I+$Jl@nX-6I<;QTkR9ul(~%)+w2qDY^R%*c{5Ho+fFyzPTQ5q3+>G9w$pao=@w<) zg3~Rw(=E2st;)O=r(12OTWzP?lzAIYx7kj&*-kr@xdW#iw$l#V2^E!zVH87rtt6d0N?Lydkv$om=u+=Vrt=qNLR(|U(+G?x3^;T`Q72SH9w%Y1#-Jz|vQd`Lw zv0)O*PTyqDPGy_6T`$Ue+fDqNQ`<86w=G+>jk7mv+jh}U+;*$zC2iZGZA62k9hot4 zw7r`s?VR4Mo39u3#l*|anYwwaZr-Mwaonz(Z_&-S>gL;Y^A6o?M__Aovk7WzwO!k< z7xl_?{dUgZVw<K0yL7v5-=N!X*XOib zyZ3ANJNeaz^m%>$Io+WyVLX-s>y_w(9&hxXp7y+p8=SoeNKdmq%^hqQOLOJCBLzNjyKS6_NY zU)KG){~q0c|6X`d_kTtAKX8`_uuosnSDx2b_0>D|)xG*EmLKQ=J#g23dVn||*sJ|| zP!I0m-v-0h{UJSchaUR69{RSvrmyYce~%v4*YyqkgC5a8>YMtO4(PxQhi=e;8y|(| zb>QWszO8Q`_?{lsqc=VShxO=zr}QX?kLl4v{BMPAdQ6XfUXLBnV^8U^L;P4)_6clGokJ^j3%(K8R~Sv_kC?%CaX_6vHJa0h%r z=ro?C@obGdH15=Rj>cUY&((OI#`85^pz%VD7iqj$<8F6~uf~HK@6&j{#s@S$ zsPQ3<4{Llx;~|ZYYCNLxsK#R&k83=k@suXAG?A?dhbEkw$kBvL6S1q9qCyjunyAu5wI*sbQLBkMP1I|mK@*LdXwpQpCR#Mns);sDcr@YF zgijOgn(%8PpotDmbZVkY6WyBV(L}E%f|}^lM875mG%=`&5lw_NF{+6%O++*i)kI7a zaZMz|BK}FICUZ3D(qyhC^E8>S$s$b_YtpUB5>1wBvP_fZnyl1hl_sk-$veVYP1b3$ zUXu-)Y}90vCYv?cqRCcGwrSF%Nv|e-nrzpkUz44h?9ybnCVMp5tI41y`!w0F$pK9c zYH~=E!+(UezHK1~HQ z)uE|QO?7FiTT?xn>eWPlH=_*ZEYr00$wVJNe zbiJknyJ!EwPxxy)2Nvy&9rL9qZyxO{F(`f_5U+nn(5X|k7j~m8PLptW(GAgq?uvO zjA$mLnXqOenu+PT$?uGY)Vdbw3ENAz<1 zX)(xQgu?&{qf)cEn$6d2fo2OeTddg<&6aAmOta;ht(z)}P3U-`ju-2AnU0t1c%_cl>UfimH|uzd_#)@{m`-HtghMCFbfR1*Ds-Y! zCu($}Rwo*CqERQBbfQ@&T6CgSC%ii0(}{MS@asfCCpvVZTPJ#SqE{z^I?<;S{W>wA z6N5T2q!YtBF`^S8oe1khL?`{PJ9Kj7q~91l#t0f?R40=dZ`j|1o#)xCQ(WE!Z z`8NmUuc!1z1^?!tl7Azs_&2ip^@uUB)fl7B81=7@7z0~_F&g=|rHQABe{;~RH(HEr zGls_)UcKQnvVGaWZ@}0(jM2%zkzJRJH?nQ}8xDkRH@0s6&G{j8dDEjey?WE9H{125 zUvCEZf3w3Fy~YR{qt6(F#u&nwa~UJo7zUNf=|NqUbb9cN#BU zdb68<6AOm%Wdvu9;C#1<8e`c*aM=SXbd!SZxo_FConE)eXSYeO+detxvV$Q^BnT65 z&Si%{nCMNGx=oh4mtAq|;opSoF|qZS*m_KCJ;rN~@!Dg&_L$)K>NRm=7&jw~>s-!y zW7tK8e_JgK+vjrf94-4ap^S}4Y#lM|Lz5!fnpll6K9=p+(4x;I-Dg7C#b6hLT?GAl zbAW&5PfYFy-yELHpUc5wJDe^yS&~s{n12%ohKU7Xq90zi+c_CF>0m6o+mR2OIEPKv zFznFFN^!L8h_b`Bf0JP@kP+hv!#*@|j~H#1h4EpM7%`EK*sjYpgnjvl*hWljBPMu= ze-i`7C1Jao7(yn75ak^);joxMwpedh0axvM&gIM%VwuY)i#eA(o~|$+?Lc;#q2-Vy zH_E@s{HRF`!<;a}oHA)*m~f*e=~3He`!MHeo?|A|m~qCqWWRJ|`!IgTj4jN+IT7LC zB!^)iE@zhGj6Vwenj_=G_>E$l%cAXKl-R~xxe=wd zv1~6hb~6o!u#F1MW%Ez;7(rw7*#>FMWnVH(yxF9Hfz83c@#&b$p>z@W#8`GRc{4}0 zM-#+hY)<~)>@Wt&IE_!INyf>gia`jBKMLdB1az8!oX_Dth~YDa-x$P`W8%TMBur3_ z>;NY69Qzuy1reDe6O~C3!}fg1usttl*4+5)>&jG&%z2kd$ut6E7@tNE2V(iOouI8Z z#lUnYZgT@%bu)`9u341Ai?sM4DH+R{U_&CTjY33R47?>ACK$%DoxB$=mwlnhQ=!RIp~+LBi5BCJ!h|$=qV*bs2n+2P zOzZ@tBncT2-~G=Q&F9Y=~PmuvvfLJryV-&5>H2-ji>WSkjLZc0-Y|>>0v_8upS@mN1oQQFkT`*y4bTM5(4w~^?zh83dOOz3c=UG6{dN?`IAIc5Zzn?t$J@y< zMDemHUlujXqH$RazMUXY0)g^~Bw0z8*};<)wwGknvhXa6z_RFB7DImuGqfCZ_)iYQ z#$i4ypm}GLnDvHiaDWqXzy-OG2l-F{g-`^=;D!IC9;bp!Fc zh~Gv0F5>6&lzGA^eV#=P-W5_zmMPjQ=pYgpE6CgvnEw zbi&kOn6$#wVVLy7)M1!3!_;AzIt-(8nDoQsIgH+6>M=~3VbTm!_F;4gQ?KDhAP-^k z5TM6RKZ5=d z^pBu_1pOoEA3^^J`bW?|g8mWokDz}9{UhifLH`K)N6@1pTAvA4UHt`bW_}ivCgbkD`AR{iEm~ zMgJ)JN6|lu{!#RgqJI?qqv#(+|0w!L(Laj*QS^_Ze-!BDVb~qnJ&nS9E(KCvkQS^+WXB0i7 z=ov-ND0)WGGm4&3^o*fr3_WA$8AHz)ddAQ*hMqC>jG<=?J!9w@L(dp`#?Ui{o-y={ zp=S&|W9S(}&lq~f&@+afG4za~XAC`K)N_n_j#1As>N!R|$EfER`p3{ehW;`1kD-4I z{bT4KL;o21$Iw59{xS5Ap??hhW9T14{}}qm&_9O$G4zk2e+>O&=pRG>82ZQ1KaQSp z^o*lt96jUc8As1JddAT+j-GM!jH4%ymH9Y&#?dp5o^kYyqh}mFW;K)(d~CD4yg ze&-YDmq0(ZQJ7DlUjqFS=$Amh1o|b=FM)mu^h=;$0{wUz&+{mrPoPr*eG=%BKo9dQ zO_KK{c~6q}BzaGg_at?dq^^?akVJMDsYNpwk~OA=j@=#oU2 zB)TNgB}ttn(I-irCDAEKoh8vL$u*fowE+(yCga$(UA_yd=fpA)LRl=ljxd4#}qoI&@qL6DfCOB zUkcq)=#@g36uP9)B}M*INMP5_nHAOyCIy^~c2l~4sk{9h9Pn{D|Z^iEcYvqso9N0;8oB{+}y zcX-UdlS?$Y4bTXEdMD2fB~Yq&@*R)^c~AhwK)8Iu<+nosdicL2{x{q5LEk(1M4E4e zZ7ayuI|W2pK$HbUSr!Wf$Ur6{u7vuwR6!rq)3khFH z_#)yg@&NG``JfxfV-fy}@K=ReHTbD5(mMg-3FPXXp5|9bCFJ2}bF;w#`A`6bPz1%` zh7u@+GAM@%s4{%@EE|F&S?usfTeSF+EnU?7M_bQUI-m;%U=W631V&*DA`pex z*=!WfjzdCc9r(em89(c!z|29ljP8_qh5j1LF-F$egi>>cFP5ip!}bm54Nd^l5^9lNq3v#BZN@eN zA%jWO&VYSj*NUCAQIIRe!Ik0|vrFPq)rD~|X>sW|Vg|e1OjR!1^065$&9#(-l+e}i z$6T6fcRIicIpBg^$TQd}nrvUv&`H%h3+&Q0DmV+zI!h2mPz;oieOz+(Qjt4LaVfQn z(!^b6uoa?ydpMvCFJHj_E4!C2Xycu><*YO;d#=J-b(YJVE79b~M8L7BuVovh?4+IA zP2i8$naMCs!e^&y6sfk)7_T*kv*eT_b#yGpQi~hS*jZ=db)@W2CiXf`FP9I^&{=Pn zo+(Qjq-kUnFTz|>CePTY9;(IIxEQD;XQRQsN{pc1O{~i{lb0rgorcL{v%xMkt`^(D zu2!Q9O~Tn?P8q$qJeLpb*jmn-Qa2@zjZ3(H*+y2lWYEu~(`r}_d&z;T$#ygCaJgoHh`6ITxuZC_qi}yRS?GXH=mL{8 z>5}bB^~r&8Hz6tSDk5CAQD9V+aW>iMUCtoFln*U!Ic8HFMrlgM#E6aN>ZDORX^M8* zBS_ki)6|HmADWFb1|}7nM+K!@fqMmRl@t|gO3yZO*J$K?3k{`>*ga_M;n3lHFeYDMjJ?44dV%ru0^{ce#>@+hmlqf>FEB=4V0^s5*m!~QD{Bw6z!-6XvEc$^ zLslU|6`&Jixdp~)3yi%M7-uaoc3NP31L+E^3`Z6iQY_GwUZBgnK=*cm?(72H$c1DK zQcBZhNN4pZ%>y*eV=_$}NYgUYTx#i@Zom)II%$^++)xGW;0OFLb(3c5hUJeCgfXRa z@t;e)x%j8Amd>q)8mI-*$R&I({+XIdGX;~*B}`rc6hjlVKpSu_pSbd!kOzd%ZwAuK zC%^eIr3-R_bP9%mc<5NA3md@)!!V+B5n+pPFCs5R#9c)CMZ{ec0pck}w_@U9nRL3C zG>Xx`I1I#JjQ;Ly$N~JhiN{?Il!F_8Zu09UKkfk_JvZ^P({0*KxDv{sWK`)=@?V+{ zg@BHwtw8=tDZ^6Ap|l?cf$(MIqYS@g=u}2sl%a1Kc_|~W<-}LcxpK;!=9aGT0%=u} zMpc>8)%dTj1oWsT{u=UJgYGqyc@6Q@kX8+8)Zo9Ca;+u4TH>f9&3bgIC*FF>tC2Ej zqzoHLvx$0Z;<$-CH=$!okg_c_mzMn z9}tHR|32!fo%GvDyPa}rC(rGK=a!IeCr|BMTkRc-LDXocc>Sp2M|D5h^i%YHy!eT} zgXlX*s*~(?5~K_Fu7J|rR0DUCbT3)yO)4Fvc!H?Grh4fi#<3TQ=L8@<%FoP7q5Jkbnb9#iL4iRUF^g^f{ z;(REs^e8HiMwK2TpJN^%jxoZIk)JWbk5P?dC$2;`7Ct-c_~AuGOrfP(XX6y zajGt8?%Rv?XhAz%Y(Vpdav(Pi95?0yu{9byDK$|#EID0l!ar?vv569DqSTwvu9=c; zCfa6l)=Y^ulS~UaXu)6WFi`q!*xOp6+mMczF2oyHRjx%3YI~_pF9EznM1x*zuhOF5 zp~V1M?;t=Y_D&M$=3Ec5hs1j+mR|JeB@;pX2T_$4ycnc9gJVE3^pRj6)!0WS`p85d z8RPZMV!s;*(@#9Ky2SzP10!1G*}pj0p~WE-9ZG3&m;xHc?=ZzTOxnz`FOCpq1SLoC zKSKUP19iz&0DW4@ZPF5x7fX3%TH;b);!(M#17%P}zN;vwYVuo6dexk-A?=z9Ezum7>d>XG8Hl6a0i;=vzV(z{1Lf0z&W)tW zLy2!6paXEHAuKgz19|5GwbYEh&G_RXwbX*2R{XaTKNtLxhw}AMmfm70*OCu^KI}g7 z*4_=24KF;+^}ocEX33APe)7e3D|}-Cq~Ry;e)19^&43$-Gf)cTB@hO}b`UR5rKJwi z>ZFXi$Ws??UHI!Fzg?u$P55rgw1<555VnW9qung^;in(}{gi1x=?@SePn@Me@;^vE zhbXTh+=h|EdFPnGJC~0?+Qqs2R-I$U?Hn_1=jimEW0vh4*Z#Re;w~(QW*7wGC`{;Fksks& zSB!fx=@gIZoSVG5D}mz@(ksDVDfy#=c8(6(xzZ-+0MejicCL)`<(x0C)wxR2uEed9 z_$$d{RgKP7x9eQ38z>VxQ|Ic4zm9n8(7PV@`Z6HSdgA2TK3CtPbKKm|HDGV_>ReL^ zpno&&&FIoX94*Av>e0D2%7B~nIXYP9Jmk$o*?P#g&jF=Co_sBUUms=RBV0S_`3dJI zUw+D%nZk1c$|pcxJIGrH;X8@1o4C3u^KSCVwRoa@$T{bX0?^2|y4gG(;hpm!_W1UXb@_4!wQjjhqH4|Y=+0k%KRNw# z@2}Cn-aq~G=#S}j`#+EC9{<`We|?|)IXr24((&xy_w3KntN4GsivQ=?U+)+H)34S4 zy!!Kc_1_J@HvjYHuj8A)j(%Hqj1{2%Y;|MA`T$N!vq{`b>|Uw=P*Tln$$$@8C2A72(d%bvAQAHHDu z@GZcH&$Sdf9r(xB-A^BW9scm)=EM8v4-cihkrhO`0vrj(KnBO{>Y#Ak$>&czdwEW zf&Ihp?;ofBJ$_LCIQ7Rleen2mJM)kG>HEWfZ$2*kb-B3o%szcwe)0Hgc=_K?AJ_hx z`5EBj)*tiMzu)`tkJ-7$A7kdv({khEZpY()fArVoar4uM-@8Aa{q=wK$9wbF_5JTp zAKpuUcz^og$J-C@FF*eF$MT-?<8ObC-Wz^+C-~u=;D_InKfG`I@b^&o@T2XAcWfWt zp?&y0`ooX6AOG>^>fP9f|E_#^FZJO))Q5LYAKu@5cz^TZUCf7fFdyE*jJ?ZGWB=jP zn7w}*cPpO=InQU$7tddwubyw7@17r?pPpZyiD&AW^UQk|Jd2(s&$8$L{jpX(Yo2w_ zhG)~W<=OV^cy>K|o_)`O=g{NN>0ig56VIvV%yaI!@c486*OlkmbK|-7%sh9VdykHP z{mb*{dGb7aUOca!H;+c2V#dGz&-1tE-=6VMS#sQ*#_qy9(zetj6umks}SACLMU^*f0+9`!%! zf7Jh||53l+>&BygpRmTGexI_2Pg&zpzn=leqy9(zkNO|=Kk9$f?^D=#)c>g8r?K&< z-|v~@QNO><%XrlPsNb(s<5B;ke!rg%e+!)PsQ*#_qy8uTPx_zqKk0wc|D^v(|C4?{ zgN`TtPx_zqKk4_$Z#?OL(*LCYNx$Ex$CLgi{ZIOz^gro;(*LCYN&l1nC;fii98db6 z^grqMYv_2=|D^v(|C9bF{ZIOz^!xR7Jn8pq?0C}ur2k3(ll~|DPx_zqKk0wc|D^v( z|C9bF{ZIOz^!wZ~p7cNI_cO)tv-R)^cKCIB_;q_c>wnh&tp8cRU&n`E$H%jNv#McM zHT*0-{473v|23ZVKkI+i@8^@@*Zc9T|5?AERmQXaXZ_FmpY=cMf7bu3|5^XD{%8Hq z`k(dt*=9WJ_iM#?*6-(>;pd$3tp8d6v;JrOzIPkX`k(bb>wnh&tp8d6v;JrO&-$PB z`x$9G>wnh&tp8cRUx&uC{%8Guei|?OZFk0t{uli(`d{?F==b~Ac+vl&|3&|c{uli( z`d{?>*=xM$f6@P<|3&|cexFf>9ng5u?;C>gqTla>!!Bsp1r48F#*6+J{V)3c{5D?n zzv#Ca8ZY`^^uOqT(f^|VMgNQb7yU2#{k%6`^uOqT(eL}x;q%aV(f^|VMgNQb7yU2# zU-ZA|f6?zV(|FPUs^4d);pfKTTa59l|5g91{#X64`d{^%(+zXFVNN$*^}p(W)$d!6 z@v8q-|EvC2{jd6cCpupBzv_S0|Ek~5rQ=oqtNvI0ulis0dzU$0_4}E1_&H&`>VMVm z_lohV-{-pFbKUUm%6QfPs^7OP!bJuh zzL^=X`d{_G>VMP!rvFX3`GzrvFXwnk(uK!*C zyMCX+hi}4$8Rjs<9A=o~UH`j&-;NF6j*WNy@A`d9HhfDq-u1uhf7kEx`*_#?uK!*C zyM8}A4nI2%-=>Xs{qOpH&Kd9e-}S%if7kDG|M2t1c-Q}~-#2f=H*e!zzwhmb@9l@r zM#Fda!zORo`@V;Q!`_&@zNdBY}e*yIhHyy5$sVUss(@`g>`@XhJ4$s4{l9e$=9 zHhIG)Z`kAwo4nzx)M1l1e4RRM@`g>`@N?(z9ni4J8{W4Jo4jF@H@tTl-n$H&yz#ew zo4jF@H*E5TcQL~zZ`kAwo4jF@H*E5TP2RA{8{X3lo4jF@H~dUHZ1RRp-uPSp-}-&u zH2&8Aw|+nG4)1h^t=_QJ8-MHferNa@c=)<^*y;^iy9*y;`MfQGH! z@Rjhe)f={Y!&k$@R&Utq4POxtTfO1E*|60czT+Bxt{%2}!&YzD>J3}H;T_Vj)f={Y z!_VBqR&Utq4O_k8JF#J_H+Z`kS$TfO0{>0zrke6Kca^@gq9u+$lY#-enD2yZ`kS$TfO1y?_sMqZ1sk%-mujhK0Azm>;Jd@ zf9v--V)z^}{;l7JZ+K}o{-fW9Z}{4L_~Hy-d_$|zG2HZ{-ggt`h9L0HhshQ zjKlYg!=`W8^bMbZhfUx3kA9oJ;q&mY=^HkE!=`W8^bMQ7VbeEk`i4#4u<08%eZ!`2 z_+Dq&^bMb(hfUwG=^H*D4V%7U(>HwQGi>^XP2aHT8#aBzXQyG)H*EUGZ~edZ|JHBY zH+-fVwtd64Z`k&Y-}-;+_gZ22?r8YDHN0LJHh#m#Z~WG8<2QcmxAPmn_1pT5-}>$S z#&7*Lf5VH);g!Vj8GqRP4V%AV^EYh%hRxrw`5RtU44c2wZ~r%b>$d?MzSA1M(;C0^+X9Z?`hV;Ht=~)1VHY^;0*77TunQc% z`x;)H4!gi%7dY$!hws9M*CfMhlHqgZ@VRo>1rEEwVHY^;0>^*#+XW8anGL(Z;dRQe z3mkTV!*^)IcNoKG(P0-je3vnNCLRCpGpSwRunQb^fx|9v*aZ%|z+o3S>;i|^F~cr! z*aZ%|z~R-*@IrUk1rFbb47SoaM%V8FMx-A;P74E z_^*C1gU28JfArf74zGKL&EWW>-*?@^ZgAKQjz9Ws2ge`%_JhMKq47umAN{t3!z_YZ8Yo(hh5>YD;$6H|Iz%IR5CjHyrkc!`^V% z8xDKJVQ)C>4TtaEht1*mqyLZoKl=aZ|D)dyas1J5hd8{d8n%eT7ID}j4zI0-?=Ht5 z{q_UnkAC0bk3WAt<$qjr^qpqB6wm+s<@xIQ=K1dV;rZ$L<(YVACV;du}|po|)&) zbMJZZ{LAy`dGb7aUOca!H_yB0e?9-_`P=hv&wo6>J^%IS|E&MB{?Gb9>vzQ_pY{7r zH=p%?*8f?*@7Q8$7i+$J)^E|5&-$(U@>#!SUq0)%?#pNW7Jm7x-^wqa^;`Povwmy8 zeAaLAm(TirZ=BEiE&uXa|7ZOcfcdQ73NWAb`))6v^?S#X&-(rCc=B1l@0;WM=2!^E zcYm=GjFn)#nvA7jyovN z7yWjL`J(@e{xAB!=>MYM+%R^9`J(@e{xA9s24gT7=7R zF2->&j*IHitmAAzm&i9b5-H0!d1oliEvfHDqr<~)&Et$TYX`z;(O=tR^hF}Tji^M<|^I| z=Bs|@Dqr<~)&EsLf0eKLzv?&435OL9D;!ohtk^r{tNyS0d8~ZZ|5g82{cKjg>i??$ ztNyS0zv}<0|EqrQVPfbLL!W%p|4sil{a)S2d&+#%&vnIX{P?as-}Hae@0EUhZyvAp z^G*LZ{onL|)BjDsu~5G0|E8Y@3lA0^EZ_8h)BjEXH~ruAf75Ro5Kb(dSU9m58pY5k zoLG#FVm1(Cqxhaa21nt8#%>{A>Z|zGQ>bVMo%$N z4~rJ#rx-uQ_$lA@d%qCF^%$;Iwu zhyEY>f9U_A|A&6>Lc+;~lZ&BNe(3+9-}{jm-p9M^cux}Vuj5@we(2}u@=qo?;|IqK9V1DTTq5p^eANsu`%n$wE6Xu8hANsv7 z%n$uP^#9P$>cxB{-XZ3Pej~D&lH`Z}ANqgj|Dpefe(xCbL;nx`KlGcM#5>2>4aVp! z-aY20{-62{&|-=bpddf>|J3ijWPa-ZssE>b@9qN{1T@G`{Xg~p)c;ez@mjpY%uoG4 z^?R3@pZX2j@>9R}n)#{!r~aS%f9i)7@>4&s5Ie@$1jP6)b^);q$WQ%0^_#`yr~aS% zf9n6K|EGTMtYSM5+kyPl@BLNW295DtYzgvH|4;ot_1hKXmwwVAzx4moPdwz8ej~fs z9ORdN`XRsco8RP@e&f6R(*H}pO+tR@|E1qFC%^Rn(*H~UFa5vt+brale!GSI(r><# zU;2OP|E2$zetU-e(*H~UFa5vt|I+_U|1bT&^#9WTOFw}T?}OugaDM40Fyg&%e(5JM zLSn=_yBG(?yW)6PoL~BX>GvKl{`U0wrT>?H`-zYmndqPBCpJQCWTJnfpWKLnVJ7;$ zZ_Y&jME^wpME^v;chH&WpXi_Hx5vmt|3v>p|3v>pzmZ}l`n|8tME^wpME^wpM85%J zCi*A(C;BJ)C;BJ)C;BJ)?L0z|gdmB%M<)8c`_4qa_unyo%tZf0|3v>p|3v>p|3v>p zzjx%B=%47H>YwVL>YwVL>YwVL>NldyRKJN;ruwJ)r~0S*r~0S*r~0S*r~0S*O|UZ6 zKh^JjX{P$8`ltG*`ltG*`i(L()la~LfC&K;0>+g}kACm(Gu1!UKh;mhgp3Ip6EY@K z{Zsu@{Zsu@{Zsu@{f3^I>YwVL>Nom~(PyUmZDTUkZz7hd{yF`2!kN=Qr{BBX7>C9< zG$v%3(?6$wPXC<#IsJ3`=k(9%pVL35e@_3L{yF_drJ2)jSeiNgbNUTTGpBz}zg7Ua-r+-fWoc=lebNc7>&*`^!&YXUm zhs^0WYR#PfIsL}1nbU6;mpT20u9??w?3#J~^ZJcmWAvJN{qy?g^&7y(`|ix^pVvRH ze_sE*e)GB*$Hu-Q_7yRbjk#Us_0Q{{*KdCv!`aO1H=d2V#F^JWuYX?uy#9Ipwi%h% zKd*mY|Ga*)y%^hOUjMxQdHwVH=k?F)H^R-l{(1ezxS7{)_mp}4^ZMuY``ekv4l3q< znb&Wun|b{PyIIh`pxDS`WN*t>R;4v zv>cPjm_%k#|Dt{)=Ge)^P9}@`ZDq2ke^LLU{zd(Z`WN*t>R;4vYoA5^i~1M!+udhT z|Dt|7n=I0i>nq<=~OlKv(AHX&KkzodUj|C0VC{Y(1IOk?1kCH+hKm-H{` zx4+Ah{w4iO`j_-C>0i>nq<=~OlKv(AOZu1eFX>;>zodUj|C0VC{Y(0n^e^c*B#$9^ zmh>;_U(#>elqLO3`j_=D>tEKttbbYmvi@cL_JCQ|ZwHlS{mc57^)KsR*1xQOS^u(r zv)L@`U)H~@e_8*s{$>5k`j_=D>$fk=vi@cL%len~FY8~{zpQ^*|FZsN{mc3d-(!!M zW&PYnmh~^|U)H~@e_6k+VwUwU>tEKttbbX*32&D5FY8~{zpQ^*|FVA5-Yn~1(Z8a9 zMgNNa75#RPS<%0ue?`9yWLEUA=r`8QivAV-EBaUTujpUVzoLId|BC(<{VV!yC$pm8 zj5sU$SM;ywH|EcZ{uTWz`VITDqJKsIivAV-EBaUTujpUVzoLId|BC(<{VV!c^sne& z(Z8a9MgNNa75yvvSM)PFS<%nwWJNzfkQMza`d9U@>R;8rs^82xtNK^&)e@*|I{x$t;`q%WY>0i^orhiTU zn*KHY98_!sv!>rpHf#FV^snh()4!&FO~3tY*7UFGU(>&)-<~#W`q%WY>9?)Tn*KHY z%u?3$ujyaYzoy^rHf#EAZ)1BK^YpCgU(;`gn>GDw`q%W^ z|GNHl{p4RF@= zuj^me&s}9*|GNHl{R~#t^{?w+*KaSJb^Yu5*Y&UKU)R5`-{d~)`q%ZZ>tENuu76$s zy8d-yLA+vR0l|GNHl{pU`Q=-<$9 z-IZ|esee;HwvbKzoBB8PZ|dLFzo~yy|EB&;{hRtX^>6Cm)W4~J zQ~##^P5qntH}!Ao-_*aUe^dXa{!RUx`Zx7&>bD6GB9Tq~oBB8PZ|dLFzo~yy|EB&; z{hRtX^>6Cm)W4~JQ~##^P5qntH}!Ao-_*aUe^dXa{!RUx`Zx7&>fh48rGHERmi{gM zTl#H!v!#Da|CatO{agCC^l$0k(!ZtOjy+rY?SI2`WJ~{+{w@7m`nU9N>EF`7rQa?z zcJJBJZ~GnwBwPBo^l$0k(!ZsDOaGSsE&W^ixAbr6-_pOO-?lVc`nU9N>EF`7rGHC5 z!4*1xUa20z>SxAkx9-`2mae_KDCl5PFAtl8FY z&zf!h+xqRC1CnK1|F-^Z{oDGt^>6Fn*1xTPTmQCxo9Jxo-`2mae_Q{y{%!r+`nUCO z>)+OII~_nR_S4zcZ$q7J{oDGt^>6Fn*1xTPTmQCxh$h?mS%Pfqx4F%>{%!r+`t5JS z7-U=jw*DRcJNkF@@95vrzoXwqH#_=mwzH#uNB@rg9sN7{cl7V*-_Z~3WJmvw{vG{0 z`gip2=-<)5qkl*Lj()HwJNkF@@95vrzoUOg|Bn70{X6=1^zZ23(Qh-J9sN7{cl7V* zXC$(te@Fk0{vG{0`k|rh=(jD7ZEvm{$2gM`gis3>fhDBtAAJjuKr#9yZU$a@9N*xzpH;&|E~UB{k!^i_3!H6 z)xWEMSO2blyXV+FXIKBO{$2gdN9>@ptAAJjuKr#9yZTv>?COW3va5er|E_*MB)j@; zr?aacn#!*JUH!ZIclGb;-_^gXAE*jXl0E%<`uFtj>EF}8rysToY?VFzd;0hE@9DSS z&z}B0{d@ZN^zZ54)4!)5q70ywJ^g$7_w>VB+0(zLpI-^Tl0E$ZSN8PVd1p`mp8h@k zw#s1yvZsGf|DOIm{d@ZN^zZ2hG_$AQK0JH+_w?`S-_yURe^39O{yqJB`uFtj>EF}8 zr+;7nzW#mv`}+6w@9W>!zpvlsJp20h_3!K7*T1hHHp{;Lef>7++1C%BWncfkejD}d z>)+SEuYX_vzW#mv`}*zIv#)<&|Gs`(_U!B5*T1iSU;n=Tef>7>+1JlXWMBWj{(b#+ z+}YQ^uYX_vzW#mv`}+6w@9W>!&sSt$KM?2df8x= z|AGDk{RjFPqa5fz(0`!+K>vaM1N{g35A+}CKhS@mAH2(f{sa97`VaIU=s(bZp#MPs zf&K&i2l@~6ALu{Of1sad%7Oj^{RjFF^dIOy(0`!+K>vaM1N{g35A+}CKhS@m|3LqN z{saANNe=WM=s(bZp#MPsq5ebthx!loAL>8Uf2jXZ|Dpav{fGKFgBOa(fsQ*wuvk>Mb%vKKdAL>8Uf2jXZ|Dpav z{fGJw^&jd#)PJb|Q2(L+L;Z*Pxrac?In;ls|4{#-{zLtT`VaLV>Oa(fsQ*y^k$z4h zNBWQSAL(Z%a-{!A|B?P9{YUyaiX7>OpmU`ENdJ-kBmGDEkMtkuKhl4sAGQp4kt6*_ z`j7M<=|9qcq@VH0k^Uq7NBWQSAL&2Rf299N|B-%fBS-p=^dIRz(to7ieT5w92eJcX z=ScsN{v-WI`j7M<=|9qcq@VrBk^Uq7NBWQSGe$Yqf2{vl|FQmK{m1%`^&jg$)_<)3 zSU-=HWBqJhj`bhwKi1EW=2-u+{$u^e`j7P=>p#{HisxAWvHoNI3`&mmvrReHf2^Nz z%CY`q{m1%`^&jg$)_<)3SpTv9WBteakM$qxKh}S&|5*RA{$u^e`j7P=>p#|itp8a5 zvHoNI$NG=;pXldabE5x5|A~GEHYfT|^q=TI(SM@APV}GXKhb}p|3v?Z z{uBKt`cL$q=s(eaqW?tyiT)G)C;CtHpXfi)f1>|H|B3z+{U`cQ^m95n(SM@|Ec~{ z{ipg*^`GiL)z41mRR5{|Q~jsOa+gs-HK?ss2;_ zr~28Woa*P;!XV{T|Ec~{{ipg*^`GiL)qkr0RR5{|Q~js8*4CGY*ss2;_ zr}`Zn2p^YI{fu0=&oE9o)qkr0RR5X&GyM(|rWLbN%P~&-I_{Ki7Y*|6KpM{&W53`p@my=laj}yVQ_# z{S0l+^`GlM*MF}6T>rU#wl?Sb&-I_{KiBWNL(cW{i(zhauK!&Bx&Cwg=laj}pX)!@ zf3E*r|GEBi{pb2!hse49bN%P~&-I_{Ki7Y*|6KpM{&W3|WVqg3=x2L#q5nevg?^XT z!cXQx|Aqbw{TKRq%3SEb(9Z#f1I~s13;h@RFZ5sNztGRmc7-~ssB>{rT$C(m-;XDU+TZqf2p5Q&87ZJ{g?VL z^@{!9Ir`Y-ig>c7-~ssB>{rT$C(9Ahr^ zGqJhS@3uj%^k3<}(toA@O8=GqEB#mcuk>H(ztVrD|4RRr{ww`g`rSszmHsRJSNgB? zU+KTnf2IFQKOdee{a5<0^k3<}(toA@O8=GqEB#mcT{p><{ww`g`mgj~>A%wNep0UV zU+KTnf2IFQ|CRnL{a5<0^k3<}(toA@O27Ntxzc~7|4Kiro@@Qr`mgn0>%Z22t^Zp8 zwf<}U*ZQyZU+cftf35#o|F!;W{nz@h^yuk~N+zt(@P|62dG z{%if$`mgn0>%Z3TqD!vzU+cft&z$C3|F!;W{nz@h^yuk~N+ zzt+#-=UV@@{%if$`mgoh=)ci_qyI+#js6?`H~Me%-{`;5f203K|Be0|{Wtn=^xx>e z(SM`=M*ofe8~r!>Z}i{jztMlA|3?3f{u})_`fv2#=)ci_qyI+#js6?`H~Me%-{`;5 zf203K|Be0|{Wtn=^xx>e(SM`=M*ofe8~r!>Z}i{jztMlA|3?3f{u})_`fv2#>c7>0 ztN&L2t^Qm6xB74O-|D~Bf2;pi|E>O8{kQsW_225h)qku1R{yR3Tm85CZ}s2mztw-M z|5pF4{#*UG`fv5$>c7>0tN&L2t^Qm6xB74O-|D~Bf2;pi|E>O8{kQsW_225h)qku1 zR{yR3Tm85CZ}l_nacd#B`fv5$>c7>`x#w2@Oh50Qnf{r6_e?U=Khr{WJXxbY}Wz`e*uQ`e*uQ`rS6mO#e*(O#e*(O#e*38xV0jB{Tgq z{WJY6b!Pf!`du^1O#e*(O#e*(O#e(jZ=IR`nSSm%Zj)uEf2Mz?pK;DiKg*w)emBd* zX2;#~xR8{Y{yY75`tS7L>A%x|r~gj>o&G!hclz)2JHa0(_;aWKPXC?$JN32gQclvqo-08p5f2aRW|DFCj{dfA^jme$A%x|r~ghrU!FVtclz)2-|4^8f2aRWzxy+})9(gN?)2a3ztexG|4#p%e)kP>um4{E zz5aXsE=A>D|GoZu{rCFaK*+uRd;RzN@AbRgk$e63`tSAM>-V=O$i4o1{jN>rUjM!R zd;RzN@AbQJmwWy0;^bccz5aXs_xkVk-|N5Ef3N>u|GoZu{rCFs_227vPbc^K@Acp7 zzt``+PVV*J>%Z53um4{Ey?%Fh;_goF_228i*MG0y?U6j_f6(tTRvz>}=y#7K5BeYU zKj?qZ?`BUP^grl-(Ep(SLH~pP2mKHF-Sf$V{s;XJ`d!+}gZ>Bo?)>CI|AYPq{SW#d z^grl-(Ep(SLH~pP2mKHFAM`)yf6)J+|3UwQewVxQp#MStgZ>BoE_vla|AYPq{SW$G z_R53)2mKHFAM`)yf6)J+pZ^d4AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm5PHr>;jo{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jzx;ps|MLIk|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#SU;e-RfBFCN|Kp z|3p9kU;e)n{S*BY{S*BY{S*BY{S*BY{S*BY{S*BY{S*BY{S*BY{S*ECfBFCN|K7Ucj|Cj$S|KB&*|s? z%m0`EFaKZuzx;ps|MLIk|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#SU;e-RfBFCN|Kz~&@ub=-f|KEB2^ZMuY&+F&^%m0`E zFaKZuzx;ps|MLIk|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#SU;e-RfBFCN|KI^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$i zP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb z0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&} z15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UI zfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g z0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l) z8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}t zodKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW z)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9 zK%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$ z0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@ z3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG z>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4 zpw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H= z0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51b) z(SM`=M*od|X8`I9KyUQl=)ci_qyI+#js6?`H~Me%-{`;5f203K|Be0|{Wtn=^xx=r z2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(E zX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$i zP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(oz14rK|5pF4erEvc3_zU$s51b) z)qku1R{yR3Tm85CZ}s2mztw-M|5pF4{#*UG`fv5$>YwSK>7VJJ>7VJJ>7VJJ>7VJJ z>7VJJ>7VJJ>7VJJ>7VJJ>7VJJ>7VJJ>7VJJ>7VJJ>7VJJ>7VJJ>7VJJ>7VJJ>7VJJ z>7VJJ>7VJJ>7VJJ>30U8&H!|#f2Mz?f2Q9VfI0(EX8`*DRdFvnl4XS%MtOFS8Q^mU zU|IG6L{;ady2>0b&B&JJ2eST8+K<3WaNBrLyCPDl$ox{75hwO#VE~2!7zSV%fMEcJ z0T>2g7=U2_h5;A`U>Ja50QTsA^gsF^{g3`f|D*rW|LA}8V*rK$*rWf^|LA}8V*rK$ z7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~J^CO0kN!vhqyN$W=zsJ-`XBv| z{zw0#|Iz>GfAl~4AN`O1NB^V$(f{aw^gsF^{g3`f|D*rW|LA}8V*rK$7zSV%fMEcJ z0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja5 z0EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV% zfMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A` zU>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2! z7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_ zh5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~ zVE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g z7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_{QGaGtw#eG4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=Ffc>DK z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0Q>m;qydZuFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fPSi<1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0rWHd zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0rU&~G=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4WNJ2PXmYs5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$=+b}bzw}@FX#mjxq5(t$=+b}bzw}@FFa4MP zOaG<+(tqi{^k4cf{g?hr|E2%Zf9b#UU-~com;OutrT@}@>A&<}`Y-*L{!9O*|I&Zy zzw}@FFa4MPOaG<+(tqi{^k4cf{g?hr|E2%df9t>X-}-O;xBgrIt^d}4>%aBi`fvTW z{#*a8|JHx&zxChxZ~eFaTmP;9)_?22_22q${kQ&G|E-?}5Dg$2Ks1180MP)V0Yn3c z1`rLPTmP+}1`rJ(8bCCFZvD6ZTmP;9)_?22_22q${kQ&G|E>Slf9t>X-}-3)(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=P4kp9T;OAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5+rkB9u*4R<~s>3{EhJkrnq zx~~j<;Ocd<&!0@^U3wE!DD@LHF-XHSvsGbz(1cH4?dq9ayy@#205R-4>+Ga z`JK=H^3P|d5zl8wYR+eyaL;GE1P$*$Ip0~cG5pqG7VLcI>F0dsM7!Y^28VXecTNeM z7wh-u#boGtG3#_*Ow|~CFE1uN4F7EKw(`7q4n8lAVV@U=z0Ql%L+9lOKk647cF&8Q zPUpqemGff%$9XkjeO}E(o>x;d=hd~sdG*J4UYP*r)lU5LYG3$ywZX&Si@#Z3KX2A5 z4SzOR88m#{GRJ=2Obnbiul(oD%f)%~Jbd1qTRm?Mgq^qF`xmX6OiYpmy+MU?Z+?D_d-`s{pj GE%q0!3k#M2 literal 0 HcmV?d00001 diff --git a/tools/checkpoint_conversion/convert_gemma3n_checkpoints.py b/tools/checkpoint_conversion/convert_gemma3n_checkpoints.py index b9ae8df019..a3e29bdfaf 100644 --- a/tools/checkpoint_conversion/convert_gemma3n_checkpoints.py +++ b/tools/checkpoint_conversion/convert_gemma3n_checkpoints.py @@ -243,7 +243,7 @@ def _port_weights(self, layer_or_variable, hf_key, transpose_dims=None): layer_or_variable.set_weights([weights, current_weights[1]]) else: print( - f"❓ Unexpected number of weights in layer " + f"❌ Unexpected number of weights in layer " f"{layer_or_variable.name}" ) @@ -436,7 +436,7 @@ def _port_language_model(self, keras_model): self._port_weights( lm.embed_tokens.embedding, f"{hf_prefix}.embed_tokens.weight" ) - self._port_rms_norm(lm.norm, f"{hf_prefix}.norm") + self._port_rms_norm(lm.final_normalization, f"{hf_prefix}.norm") self._port_weights( lm.embed_tokens_per_layer.embedding, f"{hf_prefix}.embed_tokens_per_layer.weight", @@ -464,7 +464,7 @@ def _port_language_model(self, keras_model): transpose_dims=(1, 0), ) - for i, layer in enumerate(lm.layers): + for i, layer in enumerate(lm.transformer_layers): layer_prefix = f"{hf_prefix}.layers.{i}" # Attention @@ -789,27 +789,30 @@ def validate_output(keras_model, hf_model, hf_processor): hf_output = hf_output.detach().cpu().float().numpy() print(f" -> HF model output shape: {hf_output.shape}") keras_inputs = {k: v.numpy() for k, v in hf_inputs.items()} - keras_inputs["token_ids"] = keras_inputs.pop("input_ids") - if "token_type_ids" in keras_inputs: - del keras_inputs["token_type_ids"] - keras_inputs["pixel_values"] = np.transpose( - keras_inputs["pixel_values"], (0, 2, 3, 1) - ) - if keras_inputs["pixel_values"].ndim == 4: - keras_inputs["pixel_values"] = np.expand_dims( - keras_inputs["pixel_values"], axis=1 + backbone_keras_inputs = {} + backbone_keras_inputs["token_ids"] = keras_inputs.pop("input_ids") + backbone_keras_inputs["padding_mask"] = keras_inputs.pop( + "attention_mask" + ).astype(bool) + # Images. + pixel_values = keras_inputs.pop("pixel_values") + pixel_values_transposed = np.transpose(pixel_values, (0, 2, 3, 1)) + if pixel_values_transposed.ndim == 4: + pixel_values_transposed = np.expand_dims( + pixel_values_transposed, axis=1 ) - input_shape = keras_inputs["token_ids"].shape - seq_len = input_shape[1] - attention_mask_2d = keras_inputs["attention_mask"] - attention_mask_4d = attention_mask_2d[:, None, None, :] - causal_mask = np.tril(np.ones((seq_len, seq_len), dtype=bool))[ - None, None, :, : - ] - final_mask = causal_mask & attention_mask_4d - keras_inputs["attention_mask"] = final_mask + backbone_keras_inputs["images"] = pixel_values_transposed + # Audio. + input_features = keras_inputs.pop("input_features") + input_features_mask = keras_inputs.pop("input_features_mask") + if input_features.ndim == 3: + input_features = np.expand_dims(input_features, axis=1) + if input_features_mask.ndim == 2: + input_features_mask = np.expand_dims(input_features_mask, axis=1) + backbone_keras_inputs["input_features"] = input_features + backbone_keras_inputs["input_features_mask"] = input_features_mask print(" -> Running Keras model forward pass...") - keras_output = keras_model.predict(keras_inputs) + keras_output = keras_model.predict(backbone_keras_inputs) print(f" -> Keras model output shape: {keras_output.shape}") mean_diff = np.mean(np.abs(keras_output - hf_output)) print(f"🔶 Mean absolute difference: {mean_diff}") @@ -829,7 +832,7 @@ def main(_): ): print( " -> Loading cached Hugging Face model and processor from " - "{cache_dir}" + f"{cache_dir}" ) try: hf_model = Gemma3nForConditionalGeneration.from_pretrained( @@ -864,7 +867,7 @@ def main(_): validate_output(keras_model, hf_model, hf_processor) print(f"💾 Saving Keras preset to ./{save_path}") keras_model.save_to_preset(f"./{save_path}") - print("🏁 Conversion complete.") + print("🎉 Conversion complete.") del hf_model gc.collect() diff --git a/tools/sentencepiece_testing/create_gemma3n_test_proto.py b/tools/sentencepiece_testing/create_gemma3n_test_proto.py new file mode 100644 index 0000000000..ea3e317873 --- /dev/null +++ b/tools/sentencepiece_testing/create_gemma3n_test_proto.py @@ -0,0 +1,34 @@ +from tools.sentencepiece_testing.utils import train_sentencepiece + + +def main(): + train_sentencepiece( + ["the quick brown fox", "the earth is round"], + "gemma3n_test_vocab.spm", + vocab_size=21, + model_type="WORD", + pad_id=0, + bos_id=1, + eos_id=2, + unk_id=3, + pad_piece="", + bos_piece="", + eos_piece="", + unk_piece="", + control_symbols=[ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "[multimodal]", + ], + ) + + +if __name__ == "__main__": + main() From e3c257d6c33f21235eaed992b00f085bef5b771d Mon Sep 17 00:00:00 2001 From: harshaljanjani Date: Tue, 4 Nov 2025 18:43:31 +0530 Subject: [PATCH 06/10] exports: I like the CI green. --- keras_hub/api/layers/__init__.py | 6 ++++++ keras_hub/api/models/__init__.py | 9 +++++++++ keras_hub/api/tokenizers/__init__.py | 3 +++ keras_hub/src/models/gemma3n/gemma3n_audio_converter.py | 2 ++ 4 files changed, 20 insertions(+) diff --git a/keras_hub/api/layers/__init__.py b/keras_hub/api/layers/__init__.py index aacee7818e..142b43ab56 100644 --- a/keras_hub/api/layers/__init__.py +++ b/keras_hub/api/layers/__init__.py @@ -99,6 +99,12 @@ from keras_hub.src.models.gemma3.gemma3_image_converter import ( Gemma3ImageConverter as Gemma3ImageConverter, ) +from keras_hub.src.models.gemma3n.gemma3n_audio_converter import ( + Gemma3nAudioConverter as Gemma3nAudioConverter, +) +from keras_hub.src.models.gemma3n.gemma3n_image_converter import ( + Gemma3nImageConverter as Gemma3nImageConverter, +) from keras_hub.src.models.hgnetv2.hgnetv2_image_converter import ( HGNetV2ImageConverter as HGNetV2ImageConverter, ) diff --git a/keras_hub/api/models/__init__.py b/keras_hub/api/models/__init__.py index ca495b4433..8d5a749450 100644 --- a/keras_hub/api/models/__init__.py +++ b/keras_hub/api/models/__init__.py @@ -315,6 +315,15 @@ from keras_hub.src.models.gemma3n.gemma3n_backbone import ( Gemma3nBackbone as Gemma3nBackbone, ) +from keras_hub.src.models.gemma3n.gemma3n_causal_lm import ( + Gemma3nCausalLM as Gemma3nCausalLM, +) +from keras_hub.src.models.gemma3n.gemma3n_causal_lm_preprocessor import ( + Gemma3nCausalLMPreprocessor as Gemma3nCausalLMPreprocessor, +) +from keras_hub.src.models.gemma3n.gemma3n_tokenizer import ( + Gemma3nTokenizer as Gemma3nTokenizer, +) from keras_hub.src.models.gpt2.gpt2_backbone import GPT2Backbone as GPT2Backbone from keras_hub.src.models.gpt2.gpt2_causal_lm import ( GPT2CausalLM as GPT2CausalLM, diff --git a/keras_hub/api/tokenizers/__init__.py b/keras_hub/api/tokenizers/__init__.py index b155d0e6e1..d4692cb8c8 100644 --- a/keras_hub/api/tokenizers/__init__.py +++ b/keras_hub/api/tokenizers/__init__.py @@ -41,6 +41,9 @@ from keras_hub.src.models.gemma3.gemma3_tokenizer import ( Gemma3Tokenizer as Gemma3Tokenizer, ) +from keras_hub.src.models.gemma3n.gemma3n_tokenizer import ( + Gemma3nTokenizer as Gemma3nTokenizer, +) from keras_hub.src.models.gpt2.gpt2_tokenizer import ( GPT2Tokenizer as GPT2Tokenizer, ) diff --git a/keras_hub/src/models/gemma3n/gemma3n_audio_converter.py b/keras_hub/src/models/gemma3n/gemma3n_audio_converter.py index aa15e22dee..7f94271bf0 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_audio_converter.py +++ b/keras_hub/src/models/gemma3n/gemma3n_audio_converter.py @@ -7,8 +7,10 @@ import tensorflow as tf except ImportError: tf = None +from keras_hub.src.api_export import keras_hub_export +@keras_hub_export("keras_hub.layers.Gemma3nAudioConverter") class Gemma3nAudioConverter(keras.layers.Layer): """Converts raw audio waveforms into log-mel spectrograms. From c8fc40f767e3041c97987ba1b2ceffc5f6ecfaa0 Mon Sep 17 00:00:00 2001 From: harshaljanjani Date: Tue, 4 Nov 2025 19:51:00 +0530 Subject: [PATCH 07/10] skipif: Keras version compatibility in CausalLM tests --- keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py index c8beeaae3a..c720f00628 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py @@ -3,8 +3,10 @@ import keras import numpy as np +import pytest from absl.testing import parameterized from keras import ops +from packaging import version from keras_hub.src.models.gemma3n.gemma3n_audio_converter import ( Gemma3nAudioConverter, @@ -35,6 +37,10 @@ from keras_hub.src.utils.keras_utils import running_on_gpu +@pytest.mark.skipif( + version.parse(keras.__version__) > version.parse("3.8.0"), + reason=("Some facets of Gemma3nCausalLM are unsupported in keras > 3.8.0"), +) class Gemma3nCausalLMTest(TestCase, parameterized.TestCase): def setUp(self): self.tokenizer = MockGemma3nTokenizer() From ddd29bb42d1a2b4f7dafab1ba98ca3d8046ca055 Mon Sep 17 00:00:00 2001 From: harshaljanjani Date: Wed, 5 Nov 2025 19:08:54 +0530 Subject: [PATCH 08/10] refactor: Gemma3nCausalLM and update tests --- .../src/models/gemma3n/gemma3n_causal_lm.py | 35 +---- .../gemma3n/gemma3n_causal_lm_preprocessor.py | 120 ++++++++++-------- .../gemma3n_causal_lm_preprocessor_test.py | 97 +++++++------- .../models/gemma3n/gemma3n_causal_lm_test.py | 6 - 4 files changed, 114 insertions(+), 144 deletions(-) diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm.py index 3807588f21..3e41e5eb9e 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_causal_lm.py +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm.py @@ -100,39 +100,8 @@ def __init__( self.backbone = backbone # === Functional Model === - backbone_inputs = backbone.input.copy() - inputs = backbone_inputs.copy() - hidden_state = backbone(backbone_inputs) - is_text_only = "images" not in inputs and "input_features" not in inputs - if not is_text_only: - if "images" not in inputs: - inputs["images"] = keras.Input( - shape=(None, None, None, 3), name="images", dtype="float32" - ) - inputs["vision_indices"] = keras.Input( - shape=(None,), dtype="int32", name="vision_indices" - ) - inputs["vision_mask"] = keras.Input( - shape=(None,), dtype="bool", name="vision_mask" - ) - if "input_features" not in inputs: - inputs["input_features"] = keras.Input( - shape=(None, None, None), - name="input_features", - dtype="float32", - ) - inputs["input_features_mask"] = keras.Input( - shape=(None, None), name="input_features_mask", dtype="bool" - ) - inputs["audios"] = keras.Input( - shape=(None, None), dtype="float32", name="audios" - ) - inputs["audio_indices"] = keras.Input( - shape=(None,), dtype="int32", name="audio_indices" - ) - inputs["audio_mask"] = keras.Input( - shape=(None,), dtype="bool", name="audio_mask" - ) + inputs = backbone.input + hidden_state = backbone(inputs) outputs = backbone.language_model.token_embedding( hidden_state, reverse=True ) diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py index 80cde1a647..109f92e62a 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py @@ -408,58 +408,75 @@ def _format_output( audio_mask = audio_mask[..., :-1] response_mask = response_mask[..., :-1] padding_mask = padding_mask[..., :-1] - batch_size = tf.shape(vision_mask)[0] - if text_only_input or self.image_converter is None: - vision_indices = tf.zeros( - shape=[batch_size, 0], - dtype=tf.int32, - ) - else: - vision_indices = self._get_vision_indices(vision_mask=vision_mask) - if text_only_input or self.audio_converter is None: - audio_indices = tf.zeros( - shape=[batch_size, 0], - dtype=tf.int32, - ) + if return_labels: + x = { + "token_ids": ( + token_ids if batched else tf.squeeze(token_ids, axis=0) + ), + "padding_mask": ( + padding_mask + if batched + else tf.squeeze(padding_mask, axis=0) + ), + } + if self.image_converter is not None: + x["images"] = images if batched else tf.squeeze(images, axis=0) + if self.audio_converter is not None: + x["input_features"] = ( + input_features + if batched + else tf.squeeze(input_features, axis=0) + ) + x["input_features_mask"] = ( + input_features_mask + if batched + else tf.squeeze(input_features_mask, axis=0) + ) else: - audio_indices = self._get_audio_indices(audio_mask=audio_mask) - x = { - # Image - "images": images if batched else tf.squeeze(images, axis=0), - # Audio - "audios": audios if batched else tf.squeeze(audios, axis=0), - "input_features": ( - input_features - if batched - else tf.squeeze(input_features, axis=0) - ), - "input_features_mask": ( - input_features_mask - if batched - else tf.squeeze(input_features_mask, axis=0) - ), - # Text - "token_ids": ( - token_ids if batched else tf.squeeze(token_ids, axis=0) - ), - "vision_indices": ( - vision_indices - if batched - else tf.squeeze(vision_indices, axis=0) - ), - "audio_indices": ( - audio_indices if batched else tf.squeeze(audio_indices, axis=0) - ), - "vision_mask": ( - vision_mask if batched else tf.squeeze(vision_mask, axis=0) - ), - "audio_mask": ( - audio_mask if batched else tf.squeeze(audio_mask, axis=0) - ), - "padding_mask": ( - padding_mask if batched else tf.squeeze(padding_mask, axis=0) - ), - } + x = { + "token_ids": ( + token_ids if batched else tf.squeeze(token_ids, axis=0) + ), + "padding_mask": ( + padding_mask + if batched + else tf.squeeze(padding_mask, axis=0) + ), + } + if self.image_converter is not None: + vision_indices = self._get_vision_indices( + vision_mask=vision_mask + ) + x["images"] = images if batched else tf.squeeze(images, axis=0) + x["vision_indices"] = ( + vision_indices + if batched + else tf.squeeze(vision_indices, axis=0) + ) + x["vision_mask"] = ( + vision_mask if batched else tf.squeeze(vision_mask, axis=0) + ) + if self.audio_converter is not None: + audio_indices = self._get_audio_indices(audio_mask=audio_mask) + x["audios"] = audios if batched else tf.squeeze(audios, axis=0) + x["input_features"] = ( + input_features + if batched + else tf.squeeze(input_features, axis=0) + ) + x["input_features_mask"] = ( + input_features_mask + if batched + else tf.squeeze(input_features_mask, axis=0) + ) + x["audio_indices"] = ( + audio_indices + if batched + else tf.squeeze(audio_indices, axis=0) + ) + x["audio_mask"] = ( + audio_mask if batched else tf.squeeze(audio_mask, axis=0) + ) if return_labels: if not batched: y = tf.squeeze(y, axis=0) @@ -675,7 +692,6 @@ def call( x["padding_mask"] = tf.squeeze(x["padding_mask"], axis=0) y = tf.squeeze(y, axis=0) sample_weight = tf.squeeze(sample_weight, axis=0) - return keras.utils.pack_x_y_sample_weight(x, y, sample_weight) # === Multimodal processing === diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py index bee9d63217..2b2d4781d8 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py @@ -118,40 +118,34 @@ def test_vision_preprocessor_basics(self): input_data=input_data, return_output=True, ) - expected_output = [ - { - "vision_indices": [list(range(7, 12)) + [0] * 5], - "audio_indices": [[]], - "vision_mask": [[0] * 7 + [1] * 5 + [0] * 8], - "audio_mask": [[0] * 20], - "token_ids": [ - [1, 9, 14, 10, 12, 16, 4] - + [8] * 5 - + [5, 16, 15, 2] - + [0] * 4 - ], - "padding_mask": [[1] * 16 + [0] * 4], - }, - [ - [9, 14, 10, 12, 16, 4] + [8] * 5 + [5, 16, 15, 2] + [0] * 5 - ], # Labels shifted. - [[0] * 13 + [1] * 2 + [0] * 5], # Zero out unlabeled examples. - ] + expected_x = { + "token_ids": [ + [1, 9, 14, 10, 12, 16, 4] + [8] * 5 + [5, 16, 15, 2] + [0] * 4 + ], + "padding_mask": [[1] * 16 + [0] * 4], + } + expected_y = [ + [9, 14, 10, 12, 16, 4] + [8] * 5 + [5, 16, 15, 2] + [0] * 5 + ] # Labels shifted. + expected_sw = [ + [0] * 13 + [1] * 2 + [0] * 5 + ] # Zero out unlabeled examples. # Check shape for images. + self.assertIn("images", output[0]) self.assertAllEqual(output[0]["images"].shape, [1, 2, 4, 4, 3]) - # Check shape for audios (should be empty). - self.assertAllEqual(output[0]["audios"].shape, [1, 0, 0]) - self.assertAllEqual(output[0]["input_features"].shape, [1, 0, 0, 128]) - self.assertAllEqual(output[0]["input_features_mask"].shape, [1, 0, 0]) - # For everything else, check the actual values. - del output[0]["images"] - del output[0]["audios"] - del output[0]["input_features"] - del output[0]["input_features_mask"] - for key in expected_output[0].keys(): - self.assertAllEqual(output[0][key], expected_output[0][key]) - self.assertAllEqual(output[1], expected_output[1]) - self.assertAllEqual(output[2], expected_output[2]) + self.assertNotIn("audios", output[0]) + self.assertNotIn("input_features", output[0]) + self.assertNotIn("input_features_mask", output[0]) + self.assertNotIn("vision_indices", output[0]) + self.assertNotIn("audio_indices", output[0]) + self.assertNotIn("vision_mask", output[0]) + self.assertNotIn("audio_mask", output[0]) + self.assertAllEqual(output[0]["token_ids"], expected_x["token_ids"]) + self.assertAllEqual( + output[0]["padding_mask"], expected_x["padding_mask"] + ) + self.assertAllEqual(output[1], expected_y) + self.assertAllEqual(output[2], expected_sw) def test_audio_preprocessor_basics(self): input_data = { @@ -163,21 +157,20 @@ def test_audio_preprocessor_basics(self): output = preprocessor(input_data) # Check that we have the right keys. self.assertIn("token_ids", output[0]) - self.assertIn("vision_indices", output[0]) - self.assertIn("audio_indices", output[0]) - self.assertIn("vision_mask", output[0]) - self.assertIn("audio_mask", output[0]) self.assertIn("padding_mask", output[0]) - self.assertIn("images", output[0]) - self.assertIn("audios", output[0]) self.assertIn("input_features", output[0]) self.assertIn("input_features_mask", output[0]) - # Check shapes for images (should be empty). - self.assertAllEqual(output[0]["images"].shape[0:2], [1, 0]) - # Check shapes for audios (should have data). - self.assertAllEqual(output[0]["audios"].shape[0:2], [1, 2]) + self.assertNotIn("images", output[0]) + self.assertNotIn("audios", output[0]) + self.assertNotIn("vision_indices", output[0]) + self.assertNotIn("audio_indices", output[0]) + self.assertNotIn("vision_mask", output[0]) + self.assertNotIn("audio_mask", output[0]) self.assertEqual(output[0]["input_features"].shape[0], 1) self.assertEqual(output[0]["input_features_mask"].shape[0], 1) + self.assertAllEqual(output[0]["input_features"].shape[0:2], [1, 2]) + self.assertGreater(output[0]["input_features"].shape[2], 0) + self.assertGreater(output[0]["input_features_mask"].shape[2], 0) def test_multimodal_preprocessor_basics(self): input_data = { @@ -192,26 +185,24 @@ def test_multimodal_preprocessor_basics(self): output = preprocessor(input_data) # Check that we have all the right keys. self.assertIn("token_ids", output[0]) - self.assertIn("vision_indices", output[0]) - self.assertIn("audio_indices", output[0]) - self.assertIn("vision_mask", output[0]) - self.assertIn("audio_mask", output[0]) self.assertIn("padding_mask", output[0]) self.assertIn("images", output[0]) - self.assertIn("audios", output[0]) self.assertIn("input_features", output[0]) self.assertIn("input_features_mask", output[0]) + self.assertNotIn("vision_indices", output[0]) + self.assertNotIn("audio_indices", output[0]) + self.assertNotIn("vision_mask", output[0]) + self.assertNotIn("audio_mask", output[0]) + self.assertNotIn("audios", output[0]) # Check shapes for images. self.assertAllEqual(output[0]["images"].shape, [1, 2, 4, 4, 3]) # Check shapes for audios. - self.assertAllEqual(output[0]["audios"].shape[0:2], [1, 2]) self.assertEqual(output[0]["input_features"].shape[0], 1) self.assertEqual(output[0]["input_features_mask"].shape[0], 1) - # Check that both vision and audio masks have some True values. - vision_mask_sum = np.sum(np.array(output[0]["vision_mask"])) - audio_mask_sum = np.sum(np.array(output[0]["audio_mask"])) - self.assertGreater(vision_mask_sum, 0) - self.assertGreater(audio_mask_sum, 0) + self.assertAllEqual(output[0]["images"].shape[0:2], [1, 2]) + self.assertAllEqual(output[0]["input_features"].shape[0:2], [1, 2]) + self.assertGreater(output[0]["input_features"].shape[2], 0) + self.assertGreater(output[0]["input_features_mask"].shape[2], 0) def test_text_no_start_end_token(self): input_data = { diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py index c720f00628..c8beeaae3a 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_test.py @@ -3,10 +3,8 @@ import keras import numpy as np -import pytest from absl.testing import parameterized from keras import ops -from packaging import version from keras_hub.src.models.gemma3n.gemma3n_audio_converter import ( Gemma3nAudioConverter, @@ -37,10 +35,6 @@ from keras_hub.src.utils.keras_utils import running_on_gpu -@pytest.mark.skipif( - version.parse(keras.__version__) > version.parse("3.8.0"), - reason=("Some facets of Gemma3nCausalLM are unsupported in keras > 3.8.0"), -) class Gemma3nCausalLMTest(TestCase, parameterized.TestCase): def setUp(self): self.tokenizer = MockGemma3nTokenizer() From 093bff8488dfff8743560a88267d8e39458062d6 Mon Sep 17 00:00:00 2001 From: harshaljanjani Date: Fri, 7 Nov 2025 14:33:02 +0530 Subject: [PATCH 09/10] feat: Complete TODO for CausalLM --- .../src/models/gemma3n/gemma3n_backbone.py | 54 +++++++-- .../src/models/gemma3n/gemma3n_causal_lm.py | 26 ++++- .../gemma3n/gemma3n_causal_lm_preprocessor.py | 107 +++++++----------- .../gemma3n_causal_lm_preprocessor_test.py | 16 +-- 4 files changed, 115 insertions(+), 88 deletions(-) diff --git a/keras_hub/src/models/gemma3n/gemma3n_backbone.py b/keras_hub/src/models/gemma3n/gemma3n_backbone.py index 3c519a39db..1941a2b74f 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_backbone.py +++ b/keras_hub/src/models/gemma3n/gemma3n_backbone.py @@ -190,6 +190,26 @@ def __init__( def build(self, input_shape): super().build(input_shape) + def compute_output_spec(self, inputs): + input_ids_spec = inputs["token_ids"] + batch_size = input_ids_spec.shape[0] + seq_len = input_ids_spec.shape[1] + inputs_embeds_spec = keras.KerasTensor( + shape=(batch_size, seq_len, self.text_hidden_size), + dtype=input_ids_spec.dtype + if hasattr(input_ids_spec.dtype, "name") + else "float32", + ) + num_layers = self.language_model.num_hidden_layers + per_layer_hidden_size = self.language_model.hidden_size_per_layer_input + per_layer_inputs_spec = keras.KerasTensor( + shape=(batch_size, seq_len, num_layers, per_layer_hidden_size), + dtype=input_ids_spec.dtype + if hasattr(input_ids_spec.dtype, "name") + else "float32", + ) + return inputs_embeds_spec, per_layer_inputs_spec + def call(self, inputs): input_ids = inputs["token_ids"] pixel_values = inputs.get("pixel_values") @@ -640,6 +660,9 @@ def __init__( self.vision_encoder = MobileNetV5Backbone.from_config( local_vision_encoder_config ) + if not self.vision_encoder.built: + input_shape = (None,) + tuple(self.vision_encoder.image_shape) + self.vision_encoder.build(input_shape) self.audio_encoder = None if audio_encoder_config: audio_encoder_sig = inspect.signature(Gemma3nAudioEncoder.__init__) @@ -660,6 +683,14 @@ def __init__( self.audio_encoder = Gemma3nAudioEncoder( dtype=dtype, **filtered_kwargs ) + if not self.audio_encoder.built: + mel_shape = ( + None, + None, + self.audio_encoder.input_feat_size, + ) + mask_shape = (None, None) + self.audio_encoder.build((mel_shape, mask_shape)) self.language_model = Gemma3nTextModel( pad_token_id=pad_token_id, vocab_size=text_vocab_size, @@ -702,6 +733,8 @@ def __init__( dtype=dtype, name="vision_embedder", ) + if not self.embed_vision.built: + self.embed_vision.build((None, None)) self.embed_audio = None if self.audio_encoder: self.embed_audio = Gemma3nMultimodalEmbedder( @@ -713,6 +746,8 @@ def __init__( dtype=dtype, name="audio_embedder", ) + if not self.embed_audio.built: + self.embed_audio.build((None, None)) self.embedding_processor = Gemma3nMultimodalEmbeddingProcessor( language_model=self.language_model, vision_encoder=self.vision_encoder, @@ -739,7 +774,8 @@ def __init__( processor_inputs = { "token_ids": token_ids_input, } - model_inputs = { + model_inputs_list = [token_ids_input, padding_mask_input] + model_inputs_dict = { "token_ids": token_ids_input, "padding_mask": padding_mask_input, } @@ -753,7 +789,8 @@ def __init__( name="images", ) processor_inputs["pixel_values"] = images_input - model_inputs["images"] = images_input + model_inputs_list.append(images_input) + model_inputs_dict["images"] = images_input if self.audio_encoder: input_features_input = keras.Input( shape=(None, None, self.audio_encoder.input_feat_size), @@ -765,8 +802,10 @@ def __init__( ) processor_inputs["input_features"] = input_features_input processor_inputs["input_features_mask"] = input_features_mask_input - model_inputs["input_features"] = input_features_input - model_inputs["input_features_mask"] = input_features_mask_input + model_inputs_list.append(input_features_input) + model_inputs_list.append(input_features_mask_input) + model_inputs_dict["input_features"] = input_features_input + model_inputs_dict["input_features_mask"] = input_features_mask_input final_embeds, per_layer_inputs = self.embedding_processor( processor_inputs ) @@ -783,13 +822,14 @@ def __init__( per_layer_inputs, ) super().__init__( - inputs=model_inputs, + inputs=model_inputs_list, outputs=sequence_output, dtype=dtype, **kwargs, ) # === Config === + self._model_inputs_dict = model_inputs_dict self.text_vocab_size = text_vocab_size self.text_hidden_size = text_hidden_size self.num_hidden_layers = num_hidden_layers @@ -876,7 +916,3 @@ def get_config(self): } ) return config - - @classmethod - def from_config(cls, config): - return cls(**config) diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm.py index 3e41e5eb9e..7b6288aab5 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_causal_lm.py +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm.py @@ -100,7 +100,25 @@ def __init__( self.backbone = backbone # === Functional Model === - inputs = backbone.input + inputs = backbone._model_inputs_dict.copy() + if "images" in inputs: + if "vision_indices" not in inputs: + inputs["vision_indices"] = keras.Input( + shape=(None,), dtype="int32", name="vision_indices" + ) + if "vision_mask" not in inputs: + inputs["vision_mask"] = keras.Input( + shape=(None,), dtype="bool", name="vision_mask" + ) + if "input_features" in inputs: + if "audio_indices" not in inputs: + inputs["audio_indices"] = keras.Input( + shape=(None,), dtype="int32", name="audio_indices" + ) + if "audio_mask" not in inputs: + inputs["audio_mask"] = keras.Input( + shape=(None,), dtype="bool", name="audio_mask" + ) hidden_state = backbone(inputs) outputs = backbone.language_model.token_embedding( hidden_state, reverse=True @@ -222,15 +240,17 @@ def call_with_cache( the final hidden representation of the input tokens, and `cache` is the decoding cache. """ - # TODO: Make design decisions for `vision_indices`, `audio_indices`, - # `vision_mask` and `audio_mask`. # Build inputs dict for embedding processor. processor_inputs = {"token_ids": token_ids} if pixel_values is not None: processor_inputs["pixel_values"] = pixel_values + processor_inputs["vision_indices"] = vision_indices + processor_inputs["vision_mask"] = vision_mask if input_features is not None: processor_inputs["input_features"] = input_features processor_inputs["input_features_mask"] = input_features_mask + processor_inputs["audio_indices"] = audio_indices + processor_inputs["audio_mask"] = audio_mask # Get embeddings and per-layer inputs. inputs_embeds, per_layer_inputs = self.backbone.embedding_processor( processor_inputs diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py index 109f92e62a..60638829b4 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py @@ -408,75 +408,46 @@ def _format_output( audio_mask = audio_mask[..., :-1] response_mask = response_mask[..., :-1] padding_mask = padding_mask[..., :-1] - if return_labels: - x = { - "token_ids": ( - token_ids if batched else tf.squeeze(token_ids, axis=0) - ), - "padding_mask": ( - padding_mask - if batched - else tf.squeeze(padding_mask, axis=0) - ), - } - if self.image_converter is not None: - x["images"] = images if batched else tf.squeeze(images, axis=0) - if self.audio_converter is not None: - x["input_features"] = ( - input_features - if batched - else tf.squeeze(input_features, axis=0) - ) - x["input_features_mask"] = ( - input_features_mask - if batched - else tf.squeeze(input_features_mask, axis=0) - ) - else: - x = { - "token_ids": ( - token_ids if batched else tf.squeeze(token_ids, axis=0) - ), - "padding_mask": ( - padding_mask - if batched - else tf.squeeze(padding_mask, axis=0) - ), - } - if self.image_converter is not None: - vision_indices = self._get_vision_indices( - vision_mask=vision_mask - ) - x["images"] = images if batched else tf.squeeze(images, axis=0) - x["vision_indices"] = ( - vision_indices - if batched - else tf.squeeze(vision_indices, axis=0) - ) - x["vision_mask"] = ( - vision_mask if batched else tf.squeeze(vision_mask, axis=0) - ) - if self.audio_converter is not None: - audio_indices = self._get_audio_indices(audio_mask=audio_mask) + x = { + "token_ids": token_ids + if batched + else tf.squeeze(token_ids, axis=0), + "padding_mask": padding_mask + if batched + else tf.squeeze(padding_mask, axis=0), + } + if self.image_converter is not None: + vision_indices = self._get_vision_indices(vision_mask=vision_mask) + x["images"] = images if batched else tf.squeeze(images, axis=0) + x["vision_indices"] = ( + vision_indices + if batched + else tf.squeeze(vision_indices, axis=0) + ) + x["vision_mask"] = ( + vision_mask if batched else tf.squeeze(vision_mask, axis=0) + ) + if self.audio_converter is not None: + audio_indices = self._get_audio_indices(audio_mask=audio_mask) + x["input_features"] = ( + input_features + if batched + else tf.squeeze(input_features, axis=0) + ) + x["input_features_mask"] = ( + input_features_mask + if batched + else tf.squeeze(input_features_mask, axis=0) + ) + x["audio_indices"] = ( + audio_indices if batched else tf.squeeze(audio_indices, axis=0) + ) + x["audio_mask"] = ( + audio_mask if batched else tf.squeeze(audio_mask, axis=0) + ) + # For generation only. + if not return_labels: x["audios"] = audios if batched else tf.squeeze(audios, axis=0) - x["input_features"] = ( - input_features - if batched - else tf.squeeze(input_features, axis=0) - ) - x["input_features_mask"] = ( - input_features_mask - if batched - else tf.squeeze(input_features_mask, axis=0) - ) - x["audio_indices"] = ( - audio_indices - if batched - else tf.squeeze(audio_indices, axis=0) - ) - x["audio_mask"] = ( - audio_mask if batched else tf.squeeze(audio_mask, axis=0) - ) if return_labels: if not batched: y = tf.squeeze(y, axis=0) diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py index 2b2d4781d8..2ba99fe9cf 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor_test.py @@ -136,9 +136,9 @@ def test_vision_preprocessor_basics(self): self.assertNotIn("audios", output[0]) self.assertNotIn("input_features", output[0]) self.assertNotIn("input_features_mask", output[0]) - self.assertNotIn("vision_indices", output[0]) + self.assertIn("vision_indices", output[0]) self.assertNotIn("audio_indices", output[0]) - self.assertNotIn("vision_mask", output[0]) + self.assertIn("vision_mask", output[0]) self.assertNotIn("audio_mask", output[0]) self.assertAllEqual(output[0]["token_ids"], expected_x["token_ids"]) self.assertAllEqual( @@ -163,9 +163,9 @@ def test_audio_preprocessor_basics(self): self.assertNotIn("images", output[0]) self.assertNotIn("audios", output[0]) self.assertNotIn("vision_indices", output[0]) - self.assertNotIn("audio_indices", output[0]) + self.assertIn("audio_indices", output[0]) self.assertNotIn("vision_mask", output[0]) - self.assertNotIn("audio_mask", output[0]) + self.assertIn("audio_mask", output[0]) self.assertEqual(output[0]["input_features"].shape[0], 1) self.assertEqual(output[0]["input_features_mask"].shape[0], 1) self.assertAllEqual(output[0]["input_features"].shape[0:2], [1, 2]) @@ -189,10 +189,10 @@ def test_multimodal_preprocessor_basics(self): self.assertIn("images", output[0]) self.assertIn("input_features", output[0]) self.assertIn("input_features_mask", output[0]) - self.assertNotIn("vision_indices", output[0]) - self.assertNotIn("audio_indices", output[0]) - self.assertNotIn("vision_mask", output[0]) - self.assertNotIn("audio_mask", output[0]) + self.assertIn("vision_indices", output[0]) + self.assertIn("audio_indices", output[0]) + self.assertIn("vision_mask", output[0]) + self.assertIn("audio_mask", output[0]) self.assertNotIn("audios", output[0]) # Check shapes for images. self.assertAllEqual(output[0]["images"].shape, [1, 2, 4, 4, 3]) From 7aa2d5ed5ae4d0ef04a186633d4ff9bb781b4353 Mon Sep 17 00:00:00 2001 From: harshaljanjani Date: Sat, 8 Nov 2025 10:18:04 +0530 Subject: [PATCH 10/10] nit: Minor changes --- .../src/models/gemma3n/gemma3n_attention.py | 3 ++- .../gemma3n/gemma3n_causal_lm_preprocessor.py | 18 ++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/keras_hub/src/models/gemma3n/gemma3n_attention.py b/keras_hub/src/models/gemma3n/gemma3n_attention.py index 0d02074bd3..9b49163ac6 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_attention.py +++ b/keras_hub/src/models/gemma3n/gemma3n_attention.py @@ -674,7 +674,8 @@ def call(self, hidden_states, mask): logits = keras.ops.tanh(logits) logits = logits * softcap compute_dtype = logits.dtype - if "float16" in str(compute_dtype): + dtype_str = str(compute_dtype) + if "float16" in dtype_str or "bfloat16" in dtype_str: min_val = np.finfo(np.float16).min else: min_val = np.finfo(np.float32).min diff --git a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py index 60638829b4..95a5b7fd0c 100644 --- a/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py +++ b/keras_hub/src/models/gemma3n/gemma3n_causal_lm_preprocessor.py @@ -709,7 +709,7 @@ def call( dtype="float32", ) input_features = tf.ones( - shape=[batch_size, 0, 0, 128], + shape=[batch_size, 0, 0, self.audio_converter.feature_size], dtype="float32", ) input_features_mask = tf.ones( @@ -724,12 +724,17 @@ def call( audio_mask = token_ids == self.tokenizer.audio_placeholder_id else: # No audio converter. + feature_size = ( + self.audio_converter.feature_size + if self.audio_converter is not None + else 128 + ) audios = tf.ones( shape=[batch_size, 0, 0], dtype="float32", ) input_features = tf.ones( - shape=[batch_size, 0, 0, 128], + shape=[batch_size, 0, 0, feature_size], dtype="float32", ) input_features_mask = tf.ones( @@ -893,7 +898,7 @@ def generate_preprocess( dtype="float32", ) input_features = tf.ones( - shape=[batch_size, 0, 0, 128], + shape=[batch_size, 0, 0, self.audio_converter.feature_size], dtype="float32", ) input_features_mask = tf.ones( @@ -908,12 +913,17 @@ def generate_preprocess( audio_mask = token_ids == self.tokenizer.audio_placeholder_id else: # No audio converter. + feature_size = ( + self.audio_converter.feature_size + if self.audio_converter is not None + else 128 + ) audios = tf.ones( shape=[batch_size, 0, 0], dtype="float32", ) input_features = tf.ones( - shape=[batch_size, 0, 0, 128], + shape=[batch_size, 0, 0, feature_size], dtype="float32", ) input_features_mask = tf.ones(