| --- |
| library_name: transformers |
| base_model: |
| - mistralai/Devstral-2-123B-Instruct-2512 |
| --- |
| |
| This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [mistralai/Devstral-2-123B-Instruct-2512](https://huggingface.co/mistralai/Devstral-2-123B-Instruct-2512). |
|
|
| ### Example usage: |
|
|
| ```python |
| import torch |
| from transformers import Ministral3ForCausalLM, MistralCommonBackend |
| |
| # Load model and tokenizer |
| model_id = "tiny-random/devstral-2" |
| model = Ministral3ForCausalLM.from_pretrained( |
| model_id, |
| device_map="cuda", |
| torch_dtype="bfloat16", |
| trust_remote_code=True, |
| ) |
| tokenizer = MistralCommonBackend.from_pretrained(model_id) |
| messages = [ |
| { |
| "role": "user", |
| "content": "Hi", |
| }, |
| ] |
| |
| tokenized = tokenizer.apply_chat_template( |
| messages, return_tensors="pt", return_dict=True) |
| output = model.generate( |
| **tokenized.to("cuda"), |
| max_new_tokens=32, |
| )[0] |
| decoded_output = tokenizer.decode(output[len(tokenized["input_ids"][0]):]) |
| print(decoded_output) |
| ``` |
|
|
| ### Codes to create this repo: |
|
|
| ```python |
| import json |
| from pathlib import Path |
| |
| import accelerate |
| import torch |
| from huggingface_hub import file_exists, hf_hub_download |
| from transformers import ( |
| AutoConfig, |
| AutoModelForCausalLM, |
| AutoProcessor, |
| GenerationConfig, |
| Ministral3ForCausalLM, |
| MistralCommonBackend, |
| set_seed, |
| ) |
| |
| source_model_id = "mistralai/Devstral-2-123B-Instruct-2512" |
| save_folder = "/tmp/tiny-random/devstral-2" |
| |
| processor = AutoProcessor.from_pretrained( |
| source_model_id, trust_remote_code=True) |
| processor.save_pretrained(save_folder) |
| processor = MistralCommonBackend.from_pretrained( |
| source_model_id, trust_remote_code=True) |
| processor.save_pretrained(save_folder) |
| |
| with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
| config_json = json.load(f) |
| config_json.update({ |
| "head_dim": 32, |
| "hidden_size": 8, |
| "intermediate_size": 64, |
| "num_attention_heads": 8, |
| "num_hidden_layers": 2, |
| "num_key_value_heads": 4, |
| "tie_word_embeddings": True, |
| }) |
| del config_json['quantization_config'] |
| with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| json.dump(config_json, f, indent=2) |
| |
| config = AutoConfig.from_pretrained( |
| save_folder, |
| trust_remote_code=True, |
| ) |
| print(config) |
| torch.set_default_dtype(torch.bfloat16) |
| model = Ministral3ForCausalLM(config) |
| torch.set_default_dtype(torch.float32) |
| if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): |
| model.generation_config = GenerationConfig.from_pretrained( |
| source_model_id, trust_remote_code=True, |
| ) |
| model.generation_config.do_sample = True |
| print(model.generation_config) |
| model = model.cpu() |
| with torch.no_grad(): |
| for name, p in sorted(model.named_parameters()): |
| torch.nn.init.normal_(p, 0, 0.1) |
| print(name, p.shape) |
| model.save_pretrained(save_folder) |
| print(model) |
| ``` |
|
|
| ### Printing the model: |
|
|
| ```text |
| Ministral3ForCausalLM( |
| (model): Ministral3Model( |
| (embed_tokens): Embedding(131072, 8, padding_idx=11) |
| (layers): ModuleList( |
| (0-1): 2 x Ministral3DecoderLayer( |
| (self_attn): Ministral3Attention( |
| (q_proj): Linear(in_features=8, out_features=256, bias=False) |
| (k_proj): Linear(in_features=8, out_features=128, bias=False) |
| (v_proj): Linear(in_features=8, out_features=128, bias=False) |
| (o_proj): Linear(in_features=256, out_features=8, bias=False) |
| ) |
| (mlp): Ministral3MLP( |
| (gate_proj): Linear(in_features=8, out_features=64, bias=False) |
| (up_proj): Linear(in_features=8, out_features=64, bias=False) |
| (down_proj): Linear(in_features=64, out_features=8, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| (input_layernorm): Ministral3RMSNorm((8,), eps=1e-05) |
| (post_attention_layernorm): Ministral3RMSNorm((8,), eps=1e-05) |
| ) |
| ) |
| (norm): Ministral3RMSNorm((8,), eps=1e-05) |
| (rotary_emb): Ministral3RotaryEmbedding() |
| ) |
| (lm_head): Linear(in_features=8, out_features=131072, bias=False) |
| ) |
| ``` |