# coding=utf-8 # # Copyright (c) InternLM. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch InternLM2 model.""" import math import queue import threading import warnings import copy from typing import List, Optional, Tuple, Union from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from PIL import Image import torch import torch.utils.checkpoint from einops import rearrange from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast, ) from transformers.modeling_utils import PreTrainedModel from transformers.utils import ( add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from transformers import StoppingCriteria, StoppingCriteriaList try: from transformers.generation.streamers import BaseStreamer except: # noqa # pylint: disable=bare-except BaseStreamer = None from .configuration_internlm import InternLMConfig as InternLM2Config from .build_mlp import build_vision_tower, build_vision_projector, PLoRA logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "InternLM2Config" class StoppingCriteriaSub(StoppingCriteria): def __init__(self, stops=[], encounters=1): super().__init__() self.stops = stops def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): for stop in self.stops: if torch.all((stop == input_ids[0][-len(stop):])).item(): return True return False def text_gen(inst, tokenizer, model, stopping_criteria, temp=1.0, rept=1.005, sample=True): d = f"{inst}" input_ids = tokenizer(d, return_tensors="pt")["input_ids"] eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(["[UNUSED_TOKEN_145]"])[0]] with torch.no_grad(): generate = model.generate(input_ids.cuda(), do_sample=sample, temperature=temp, repetition_penalty=rept, max_new_tokens=1000, top_p=0.8, top_k=50, eos_token_id=eos_token_id, stopping_criteria=stopping_criteria,) res = tokenizer.decode(generate[0].tolist(), skip_special_tokens=True) return (res) # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class InternLM2RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ InternLM2RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): input_dtype = hidden_states.dtype hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) return self.weight * hidden_states.to(input_dtype) class InternLM2RotaryEmbedding(nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.dim = dim self.max_position_embeddings = max_position_embeddings self.base = base inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) # Build here to make `torch.jit.trace` work. self._set_cos_sin_cache( seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype() ) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] if seq_len > self.max_seq_len_cached: self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype) return ( self.cos_cached[:seq_len].to(dtype=x.dtype), self.sin_cached[:seq_len].to(dtype=x.dtype), ) class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding): """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev""" def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) t = t / self.scaling_factor freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding): """InternLM2RotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla. """ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): self.scaling_factor = scaling_factor super().__init__(dim, max_position_embeddings, base, device) def _set_cos_sin_cache(self, seq_len, device, dtype): self.max_seq_len_cached = seq_len if seq_len > self.max_position_embeddings: base = self.base * ( (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1) ) ** (self.dim / (self.dim - 2)) inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False) self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos.unsqueeze(0).unsqueeze(0).expand(len(position_ids), -1, -1, -1) sin = sin.unsqueeze(0).unsqueeze(0).expand(len(position_ids), -1, -1, -1) if q.size(2) == 1: q_embed = (q * cos[:, :, -1:, :]) + (rotate_half(q) * sin[:, :, -1:, :]) else: q_embed = (q * cos) + (rotate_half(q) * sin) if k.size(2) == 1: k_embed = (k * cos[:, :, -1:, :]) + (rotate_half(k) * sin[:, :, -1:, :]) else: k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class InternLM2MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.intermediate_size = config.intermediate_size #self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) #self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) #self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) self.w1 = PLoRA(self.hidden_size, self.intermediate_size, bias=False, lora_r=256, lora_alpha=256, lora_len=256) self.w3 = PLoRA(self.hidden_size, self.intermediate_size, bias=False, lora_r=256, lora_alpha=256, lora_len=256) self.w2 = PLoRA(self.intermediate_size, self.hidden_size, bias=False, lora_r=256, lora_alpha=256, lora_len=256) self.act_fn = ACT2FN[config.hidden_act] def forward(self, x, im_mask): down_proj = self.w2(self.act_fn(self.w1(x, im_mask)) * self.w3(x, im_mask), im_mask) return down_proj def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) class InternLM2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: InternLM2Config): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.is_causal = True if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) #self.wqkv = nn.Linear( self.wqkv = PLoRA( self.hidden_size, (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim, bias=config.bias, lora_r=256, lora_alpha=256, lora_len=256 ) #self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias) self.wo = PLoRA(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias, lora_r=256, lora_alpha=256, lora_len=256) self._init_rope() def _init_rope(self): if self.config.rope_scaling is None: self.rotary_emb = InternLM2RotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.config.rope_theta, ) else: scaling_type = self.config.rope_scaling["type"] scaling_factor = self.config.rope_scaling["factor"] if scaling_type == "dynamic": self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding( self.head_dim, max_position_embeddings=self.max_position_embeddings, base=self.config.rope_theta, scaling_factor=scaling_factor ) else: raise ValueError("Currently we only support rotary embedding's type being 'dynamic'.") return self.rotary_emb def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, im_mask: Optional[Tuple[torch.Tensor]] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. " "Please make sure use `attention_mask` instead.`" ) bsz, q_len, _ = hidden_states.size() qkv_states = self.wqkv(hidden_states, im_mask) qkv_states = rearrange( qkv_states, "b q (h gs d) -> b q h gs d", gs=2 + self.num_key_value_groups, d=self.head_dim, ) query_states = qkv_states[..., : self.num_key_value_groups, :] query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d") key_states = qkv_states[..., -2, :] value_states = qkv_states[..., -1, :] query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.wo(attn_output, im_mask) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class InternLM2FlashAttention2(InternLM2Attention): """ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, im_mask: Optional[Tuple[torch.Tensor]] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # InternLM2FlashAttention2 attention does not support output_attentions if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. " "Please make sure use `attention_mask` instead.`" ) # overwrite attention_mask with padding_mask attention_mask = kwargs.pop("padding_mask") output_attentions = False bsz, q_len, _ = hidden_states.size() qkv_states = self.wqkv(hidden_states, im_mask) qkv_states = rearrange( qkv_states, "b q (h gs d) -> b q h gs d", gs=self.num_heads + 2 * self.num_key_value_heads, d=self.head_dim, q=q_len, ) query_states = qkv_states[..., : self.num_key_value_groups, :] query_states = rearrange(query_states, "b q h gs d -> b q (h gs) d") key_states = qkv_states[..., -2, :] value_states = qkv_states[..., -1, :] kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = 0.0 if not self.training else self.attention_dropout # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (InternLM2RMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: # Handle the case where the model is quantized if hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back " f"the input in {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = self._flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate ) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() attn_output = self.wo(attn_output, im_mask) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class InternLM2DecoderLayer(nn.Module): def __init__(self, config: InternLM2Config): super().__init__() self.hidden_size = config.hidden_size self.attention = ( InternLM2Attention(config=config) if not getattr(config, "_flash_attn_2_enabled", False) else InternLM2FlashAttention2(config=config) ) self.feed_forward = InternLM2MLP(config) self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, im_mask: Optional[Tuple[torch.Tensor]] = None, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. " "Please make sure use `attention_mask` instead.`" ) residual = hidden_states hidden_states = self.attention_norm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.attention( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, im_mask=im_mask, **kwargs, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.ffn_norm(hidden_states) hidden_states = self.feed_forward(hidden_states, im_mask) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs InternLM2_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`InternLM2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.", InternLM2_START_DOCSTRING, ) class InternLM2PreTrainedModel(PreTrainedModel): config_class = InternLM2Config base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["InternLM2DecoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn_2 = True def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() InternLM2_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare InternLM2 Model outputting raw hidden-states without any specific head on top.", InternLM2_START_DOCSTRING, ) class InternLM2Model(InternLM2PreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`] Args: config: InternLM2Config """ _auto_class = "AutoModel" def __init__(self, config: InternLM2Config): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.tok_embeddings def set_input_embeddings(self, value): self.tok_embeddings = value # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs ) -> Union[Tuple, BaseModelOutputWithPast]: im_mask = kwargs.get('im_mask', None) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape[:2] elif inputs_embeds is not None: batch_size, seq_length = inputs_embeds.shape[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.tok_embeddings(input_ids) im_mask = torch.zeros(inputs_embeds.shape[:2]).to(inputs_embeds.device).bool() # embed positions if attention_mask is None: attention_mask = torch.ones( (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device ) attention_mask = self._prepare_decoder_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) # embed positions hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, None, im_mask) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, position_ids, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, im_mask=im_mask, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class InternLM2ForCausalLM(InternLM2PreTrainedModel): _auto_class = "AutoModelForCausalLM" _tied_weights_keys = ["output.weight"] def __init__(self, config): super().__init__(config) self.model = InternLM2Model(config) self.vocab_size = config.vocab_size self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.debug_flag = 1 self.tokenizer = None self.max_length = config.max_length print (f'Set max length to {self.max_length}') self.debug_flag = 1 # Initialize weights and apply final processing self.post_init() # self.vit = build_vision_tower() # self.vision_proj = build_vision_projector() # self.im_size = 224 # self.vis_processor = transforms.Compose([ # transforms.Resize((224, 224), # interpolation=InterpolationMode.BICUBIC), # transforms.ToTensor(), # transforms.Normalize((0.48145466, 0.4578275, 0.40821073), # (0.26862954, 0.26130258, 0.27577711)), # ]) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, InternLM2Model): module.gradient_checkpointing = value # if value: # self.vit.vision_tower.vision_model.encoder.gradient_checkpointing = value def get_input_embeddings(self): return self.model.tok_embeddings def set_input_embeddings(self, value): self.model.tok_embeddings = value def get_output_embeddings(self): return self.output def set_output_embeddings(self, new_embeddings): self.output = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model def encode_text(self, t, add_special_tokens=False): t = t.replace('<|User|>:', '[UNUSED_TOKEN_146]user\n') t = t.replace('<|Bot|>:', '[UNUSED_TOKEN_146]assistant\n') t = t.replace('', '[UNUSED_TOKEN_145]') t = t.replace('', '[UNUSED_TOKEN_145]') t = t.replace('[UNUSED_TOKEN_0]', '[UNUSED_TOKEN_145]') t = t.replace('[UNUSED_TOKEN_1]', '[UNUSED_TOKEN_145]') text = t token = self.tokenizer(text, return_tensors='pt', add_special_tokens=add_special_tokens).input_ids.to(self.device) embs = self.model.tok_embeddings(token) return embs # def encode_img(self, image): # if image is None: # return None # if isinstance(image, str): # image = Image.open(image).convert("RGB") # image = self.vis_processor(image).unsqueeze(0).to(self.device) # else: # assert isinstance(image, torch.Tensor) # img_embeds, atts_img, img_target = self.img2emb(image) # return img_embeds # def img2emb(self, image): # img_embeds = self.vision_proj( # self.vit(image.to(self.device))) # atts_img = torch.ones(img_embeds.size()[:-1], dtype=torch.long).to(img_embeds.device) # img_target = torch.ones(img_embeds.size()[:2], dtype=torch.long).to(img_embeds.device) * -100 # return img_embeds, atts_img, img_target def prompt_wrap(self, img_embeds, prompt): batch_size = img_embeds.shape[0] p_before, p_after = prompt.split('') p_before_tokens = self.tokenizer( p_before, return_tensors="pt", add_special_tokens=True).to(img_embeds.device) p_before_embeds = self.model.tok_embeddings(p_before_tokens.input_ids).expand(batch_size, -1, -1) wrapped_img_embeds = torch.cat([p_before_embeds, img_embeds], dim=1) wrapped_atts_img = torch.ones(wrapped_img_embeds.size()[:-1], dtype=torch.long).to(img_embeds.device) wrapped_target = torch.ones(batch_size, wrapped_img_embeds.shape[1], dtype=torch.long).to(img_embeds.device) * -100 return wrapped_img_embeds, wrapped_atts_img, wrapped_target def text2emb(self, text, add_special=False): # import pdb; pdb.set_trace() new_text = [] for t in text: t = t.replace('<|User|>:', '[UNUSED_TOKEN_146]user\n') t = t.replace('<|Bot|>:', '[UNUSED_TOKEN_146]assistant\n') t = t.replace('', '[UNUSED_TOKEN_145]') t = t.replace('', '[UNUSED_TOKEN_145]') new_text.append(t) text = new_text to_regress_tokens = self.tokenizer( text, return_tensors="pt", padding="longest", truncation=True, max_length=self.max_length, add_special_tokens=add_special ).to(self.device) # targets = self.mask_human_targets(to_regress_tokens.input_ids) # targets = targets.to(self.device) targets = to_regress_tokens.input_ids.masked_fill( to_regress_tokens.input_ids == self.tokenizer.pad_token_id, -100 ).to(self.device) return to_regress_tokens, targets def mask_human_targets(self, input_ids, pure=False): target_batch = [] for bs in range(input_ids.shape[0]): cur_idx = 0 ids = input_ids[bs] targets = copy.deepcopy(ids) end_count = 0 last_eoa = 0 for i, temp_id in enumerate(ids): if temp_id == 92542: if end_count % 2 == 0: targets[last_eoa: i+6] = -100 else: last_eoa = i + 1 end_count += 1 elif temp_id == 2: ### eos and following pad targets[i+1:] = -100 #### loss on eos, but not on pad break if temp_id != 2 and end_count % 2 == 0: ### trunction, end at last question targets[last_eoa+1:] = -100 #### mask all after the last answer target_batch.append(targets.unsqueeze(0)) if self.debug_flag and 0: print ('#### Warining! System meta is not support now') targets_vis = targets.clone() targets_vis[targets_vis==-100] = 92399 targets_vis_tokens = ''.join(self.tokenizer.convert_ids_to_tokens(targets_vis)).replace('[UNUSED_TOKEN_2]', " ") print(''.join(self.tokenizer.convert_ids_to_tokens(ids))) print('-----------') print([targets_vis_tokens]) print('-----------------------------') target_batch = torch.cat(target_batch, dim=0) return target_batch @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, InternLM2ForCausalLM >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" samples = kwargs.get('samples', None) if samples: if self.debug_flag: self.debug_flag += 1 if self.debug_flag > 5: self.debug_flag = 0 has_img = 'image' in samples.keys() # import pdb; pdb.set_trace() ### encode text # sp_token = samples["sp_token"] text = samples['text_input'] text = ['<|User|>:' + t for t in text] to_regress_tokens, targets = self.text2emb(text, add_special = True) to_regress_embeds = self.model.tok_embeddings(to_regress_tokens.input_ids) attention_mask = to_regress_tokens.attention_mask if has_img: ### encode image image = samples["image"][0] bs = to_regress_embeds.shape[0] assert image.shape[0] == bs ### combine text and image if samples['data_type'][0] != 'nlp': img_embeds, atts_img, img_target = self.img2emb(image) to_regress_embeds = torch.cat([to_regress_embeds[:,:1], img_embeds, to_regress_embeds[:,1:]], dim=1) attention_mask = torch.cat([attention_mask[:,:1], atts_img, attention_mask[:,1:]], dim=1) targets = torch.cat([targets[:,:1], img_target, targets[:,1:]], dim=1) im_len = img_embeds.shape[1] im_mask = torch.zeros(to_regress_embeds.shape[:2]).cuda() im_mask[:,1:1+im_len] = 1 temp_max_length = self.max_length else: img_embeds, atts_img, img_target = self.img2emb(torch.zeros(1,3,self.im_size,self.im_size).to(image.device).to(image.dtype)) to_regress_embeds += img_embeds.sum() * 0 im_mask = torch.zeros(to_regress_embeds.shape[:2]).cuda() temp_max_length = self.max_length temp_max_length = self.max_length inputs_embeds = to_regress_embeds[:, :temp_max_length] attention_mask = attention_mask[:, :temp_max_length] targets = targets[:, :temp_max_length] # im_mask = im_mask[:, :temp_max_length].bool() labels = targets if self.debug_flag: print (targets.shape, inputs_embeds.shape, attention_mask.shape) le = len(samples['text_input']) data_type = samples['data_type'][0] print (f'DataType: {data_type}. Has Image: {has_img}. Current max length: {self.max_length}, BatchSize is {le}') if has_img: print (img_embeds.shape) else: self.debug_flag = 0 im_mask = kwargs.get('im_mask', None) if im_mask is None and inputs_embeds is not None: im_mask = torch.zeros(inputs_embeds.shape[:2]).to(inputs_embeds.device) im_mask[:,1:1+256] = 1 im_mask = im_mask.bool() output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.output(hidden_states) logits = logits.float() loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss(reduce=False) B, N = shift_logits.shape[:2] shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) mask = shift_labels >= 0 # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) loss = (loss.view(B,N).sum(dim=1) / mask.view(B,N).sum(dim=1)).mean() if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, im_mask=None, **kwargs ): if past_key_values is not None: past_length = past_key_values[0][0].shape[2] # Some generation methods already pass only the last input ID if input_ids.shape[1] > past_length: remove_prefix_length = past_length else: # Default to old behavior: keep only final ID remove_prefix_length = input_ids.shape[1] - 1 input_ids = input_ids[:, remove_prefix_length:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -input_ids.shape[1] :] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} im_mask = im_mask model_inputs.update( { "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "im_mask": im_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=""): prompt = "" if meta_instruction: prompt += f"""[UNUSED_TOKEN_146]system\n{meta_instruction}[UNUSED_TOKEN_145]\n""" else: prompt += "" for record in history: prompt += f"""[UNUSED_TOKEN_146]user\n{record[0]}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n{record[1]}[UNUSED_TOKEN_145]\n""" prompt += f"""[UNUSED_TOKEN_146]user\n{query}[UNUSED_TOKEN_145]\n[UNUSED_TOKEN_146]assistant\n""" return tokenizer([prompt], return_tensors="pt") def inference(self, question, tokenizer): print(question) question = f'[UNUSED_TOKEN_146]user\n{question}[UNUSED_TOKEN_145]\n' stop_words_ids = [ torch.tensor([2]).cuda(), #'' torch.tensor([92542]).cuda(), #'[UNUSED_TOKEN_145]' ] stopping_criteria = StoppingCriteriaList( [StoppingCriteriaSub(stops=stop_words_ids)]) result = [] for i in range(3): print(f'------attempt {i}------') d = f"{question}" input_ids = tokenizer(d, return_tensors="pt")["input_ids"] eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(["[UNUSED_TOKEN_145]"])[0]] with torch.no_grad(): generate = self.generate(input_ids.cuda(), do_sample=True, temperature=1.0, repetition_penalty=1.005, max_new_tokens=1000, top_p=0.8, top_k=50, eos_token_id=eos_token_id, stopping_criteria=stopping_criteria,) response = tokenizer.decode(generate[0].tolist(), skip_special_tokens=True) response.split('[UNUSED_TOKEN_146]assistant')[1] print(response[len('[UNUSED_TOKEN_146]assistant\n'):-len('[UNUSED_TOKEN_145]\n')])