|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from transformers.configuration_utils import PretrainedConfig |
|
|
from transformers.utils import ( |
|
|
logging, |
|
|
) |
|
|
from transformers.models.auto import CONFIG_MAPPING, AutoConfig |
|
|
|
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
class LlavaOnevisionConfig(PretrainedConfig): |
|
|
r""" |
|
|
This is the configuration class to store the configuration of a [`LlavaOnevisionForConditionalGeneration`]. It is used to instantiate an |
|
|
Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration |
|
|
with the defaults will yield a similar configuration to that of the [llava-hf/llava-onevision-qwen2-7b-ov-hf](https://huggingface.co/llava-hf/llava-onevision-qwen2-7b-ov-hf) |
|
|
model. |
|
|
|
|
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
|
|
documentation from [`PretrainedConfig`] for more information. |
|
|
|
|
|
Args: |
|
|
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`): |
|
|
The config object or dictionary of the vision backbone. |
|
|
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`): |
|
|
The config object or dictionary of the text backbone. |
|
|
image_token_index (`int`, *optional*, defaults to 151646): |
|
|
The image token index to encode the image prompt. |
|
|
video_token_index (`int`, *optional*, defaults to 151647): |
|
|
The video token index to encode the video prompt. |
|
|
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): |
|
|
The activation function used by the multimodal projector. |
|
|
vision_feature_select_strategy (`str`, *optional*, defaults to `"full"`): |
|
|
The feature selection strategy used to select the vision feature from the vision backbone. |
|
|
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features. |
|
|
If `"full"`, the full vision features are used. |
|
|
vision_feature_layer (`Union[int, List[int]]`, *optional*, defaults to -1): |
|
|
The index of the layer to select the vision feature. If multiple indices are provided, |
|
|
the vision feature of the corresponding indices will be concatenated to form the |
|
|
vision features. |
|
|
vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`): |
|
|
Aspect ratio used when processong image features. The default value is "anyres_max_9". |
|
|
image_grid_pinpoints (`List`, *optional*): |
|
|
A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list |
|
|
of the form `(height, width)`. |
|
|
tie_word_embeddings (`bool`, *optional*, defaults to `False`): |
|
|
Whether the model's input and output word embeddings should be tied. |
|
|
multimodal_projector_bias (`bool`, *optional*, defaults to `True`): |
|
|
Whether to use bias in the multimodal projector. |
|
|
|
|
|
Example: |
|
|
|
|
|
```python |
|
|
>>> from transformers import LlavaOnevisionForConditionalGeneration, LlavaOnevisionConfig, SiglipVisionConfig, Qwen2Config |
|
|
|
|
|
>>> # Initializing a CLIP-vision config |
|
|
>>> vision_config = SiglipVisionConfig() |
|
|
|
|
|
>>> # Initializing a Llama config |
|
|
>>> text_config = Qwen2Config() |
|
|
|
|
|
>>> # Initializing a Llava-Next llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration |
|
|
>>> configuration = LlavaOnevisionConfig(vision_config, text_config) |
|
|
|
|
|
>>> # Initializing a model from the llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration |
|
|
>>> model = LlavaOnevisionForConditionalGeneration(configuration) |
|
|
|
|
|
>>> # Accessing the model configuration |
|
|
>>> configuration = model.config |
|
|
```""" |
|
|
|
|
|
model_type = "sdar_v" |
|
|
attribute_map = { |
|
|
"image_token_id": "image_token_index", |
|
|
"video_token_id": "video_token_index", |
|
|
} |
|
|
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig} |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vision_config=None, |
|
|
text_config=None, |
|
|
image_token_index=151646, |
|
|
video_token_index=151647, |
|
|
projector_hidden_act="gelu", |
|
|
vision_feature_select_strategy="full", |
|
|
vision_feature_layer=-1, |
|
|
vision_aspect_ratio="anyres_max_9", |
|
|
image_grid_pinpoints=None, |
|
|
tie_word_embeddings=False, |
|
|
multimodal_projector_bias=True, |
|
|
**kwargs, |
|
|
): |
|
|
self.image_token_index = image_token_index |
|
|
self.video_token_index = video_token_index |
|
|
self.projector_hidden_act = projector_hidden_act |
|
|
self.multimodal_projector_bias = multimodal_projector_bias |
|
|
|
|
|
if vision_feature_select_strategy not in ["default", "full"]: |
|
|
raise ValueError( |
|
|
"vision_feature_select_strategy should be one of 'default', 'full'." |
|
|
f"Got: {vision_feature_select_strategy}" |
|
|
) |
|
|
|
|
|
self.vision_feature_select_strategy = vision_feature_select_strategy |
|
|
self.vision_feature_layer = vision_feature_layer |
|
|
self.vision_aspect_ratio = vision_aspect_ratio |
|
|
image_grid_pinpoints = ( |
|
|
image_grid_pinpoints |
|
|
if image_grid_pinpoints is not None |
|
|
else [ |
|
|
[384, 384], |
|
|
[384, 768], |
|
|
[384, 1152], |
|
|
[384, 1536], |
|
|
[384, 1920], |
|
|
[384, 2304], |
|
|
[768, 384], |
|
|
[768, 768], |
|
|
[768, 1152], |
|
|
[768, 1536], |
|
|
[768, 1920], |
|
|
[768, 2304], |
|
|
[1152, 384], |
|
|
[1152, 768], |
|
|
[1152, 1152], |
|
|
[1152, 1536], |
|
|
[1152, 1920], |
|
|
[1152, 2304], |
|
|
[1536, 384], |
|
|
[1536, 768], |
|
|
[1536, 1152], |
|
|
[1536, 1536], |
|
|
[1536, 1920], |
|
|
[1536, 2304], |
|
|
[1920, 384], |
|
|
[1920, 768], |
|
|
[1920, 1152], |
|
|
[1920, 1536], |
|
|
[1920, 1920], |
|
|
[1920, 2304], |
|
|
[2304, 384], |
|
|
[2304, 768], |
|
|
[2304, 1152], |
|
|
[2304, 1536], |
|
|
[2304, 1920], |
|
|
[2304, 2304], |
|
|
] |
|
|
) |
|
|
self.image_grid_pinpoints = image_grid_pinpoints |
|
|
|
|
|
if isinstance(vision_config, dict): |
|
|
vision_config["model_type"] = ( |
|
|
vision_config["model_type"] if "model_type" in vision_config else "siglip_vision_model" |
|
|
) |
|
|
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) |
|
|
elif vision_config is None: |
|
|
vision_config = CONFIG_MAPPING["siglip_vision_model"]( |
|
|
hidden_size=1152, |
|
|
intermediate_size=4304, |
|
|
patch_size=14, |
|
|
image_size=384, |
|
|
num_hidden_layers=26, |
|
|
num_attention_heads=14, |
|
|
vision_use_head=False, |
|
|
) |
|
|
|
|
|
self.vision_config = vision_config |
|
|
|
|
|
if isinstance(text_config, dict): |
|
|
text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "qwen2" |
|
|
try: |
|
|
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) |
|
|
except: |
|
|
from .configuration_sdar import SDARConfig |
|
|
text_config = SDARConfig(**text_config) |
|
|
elif text_config is None: |
|
|
text_config = CONFIG_MAPPING["qwen2"]() |
|
|
|
|
|
self.text_config = text_config |
|
|
|
|
|
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) |
|
|
|
|
|
|
|
|
__all__ = ["LlavaOnevisionConfig"] |
|
|
|