AxelPCG commited on
Commit
72659d5
·
verified ·
1 Parent(s): 93740bb

Upload SPLADE-PT-BR model v1.0.0

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -33
  2. README.md +231 -0
  3. config.json +21 -0
  4. config.yaml +44 -0
  5. model_metadata.json +108 -0
  6. pytorch_model.bin +3 -0
  7. tokenizer_config.json +6 -0
.gitattributes CHANGED
@@ -1,35 +1,3 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
  *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
1
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  *.tar filter=lfs diff=lfs merge=lfs -text
3
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
README.md ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: pt
3
+ license: apache-2.0
4
+ tags:
5
+ - information-retrieval
6
+ - sparse-retrieval
7
+ - splade
8
+ - portuguese
9
+ - bert
10
+ datasets:
11
+ - unicamp-dl/mmarco
12
+ - castorini/mr-tydi
13
+ base_model: neuralmind/bert-base-portuguese-cased
14
+ ---
15
+
16
+ # SPLADE-PT-BR
17
+
18
+ SPLADE (Sparse Lexical AnD Expansion) model fine-tuned for **Portuguese** text retrieval. This model is based on [BERTimbau](https://huggingface.co/neuralmind/bert-base-portuguese-cased) and trained on Portuguese question-answering datasets.
19
+
20
+ ## Model Description
21
+
22
+ SPLADE is a neural retrieval model that learns to expand queries and documents with contextually relevant terms while maintaining sparsity. Unlike dense retrievers, SPLADE produces sparse vectors (typically ~99% sparse) that are:
23
+ - **Interpretable**: Each dimension corresponds to a vocabulary token
24
+ - **Efficient**: Can use inverted indexes for fast retrieval
25
+ - **Effective**: Combines lexical matching with semantic expansion
26
+
27
+ ### Key Features
28
+
29
+ - **Base Model**: `neuralmind/bert-base-portuguese-cased` (BERTimbau)
30
+ - **Vocabulary Size**: 29,794 tokens (Portuguese-optimized)
31
+ - **Training Iterations**: 150,000
32
+ - **Final Training Loss**: 0.000047
33
+ - **Sparsity**: ~99% (100-150 active dimensions per vector)
34
+ - **Max Sequence Length**: 256 tokens
35
+
36
+ ## Training Details
37
+
38
+ ### Training Data
39
+
40
+ - **Primary Dataset**: mMARCO Portuguese (MS MARCO translated to Portuguese)
41
+ - **Validation**: Portuguese query-document pairs
42
+ - **Format**: Triplets (query, positive document, negative document)
43
+
44
+ ### Training Configuration
45
+
46
+ ```yaml
47
+ - Learning Rate: 2e-5
48
+ - Batch Size: 8 (effective: 32 with gradient accumulation)
49
+ - Gradient Accumulation Steps: 4
50
+ - Weight Decay: 0.01
51
+ - Warmup Steps: 6,000
52
+ - Mixed Precision: FP16
53
+ - Optimizer: AdamW
54
+ ```
55
+
56
+ ### Regularization
57
+
58
+ FLOPS regularization is applied to enforce sparsity:
59
+ - **Lambda Query**: 0.0003 (queries are more sparse)
60
+ - **Lambda Document**: 0.0001 (documents less sparse for better recall)
61
+
62
+ ## Usage
63
+
64
+ ### Installation
65
+
66
+ ```bash
67
+ pip install torch transformers
68
+ ```
69
+
70
+ ### Basic Usage
71
+
72
+ ```python
73
+ import torch
74
+ from transformers import AutoTokenizer
75
+ from splade.models.transformer_rep import Splade
76
+
77
+ # Load model and tokenizer
78
+ model = Splade.from_pretrained("AxelPCG/splade-pt-br")
79
+ tokenizer = AutoTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
80
+ model.eval()
81
+
82
+ # Encode a query
83
+ query = "Qual é a capital do Brasil?"
84
+ with torch.no_grad():
85
+ query_tokens = tokenizer(query, return_tensors="pt", max_length=256, truncation=True)
86
+ query_vec = model(q_kwargs=query_tokens)["q_rep"].squeeze()
87
+
88
+ # Encode a document
89
+ document = "Brasília é a capital federal do Brasil desde 1960."
90
+ with torch.no_grad():
91
+ doc_tokens = tokenizer(document, return_tensors="pt", max_length=256, truncation=True)
92
+ doc_vec = model(d_kwargs=doc_tokens)["d_rep"].squeeze()
93
+
94
+ # Calculate similarity (dot product)
95
+ similarity = (query_vec * doc_vec).sum().item()
96
+ print(f"Similarity: {similarity:.4f}")
97
+
98
+ # Get sparse representation
99
+ indices = torch.nonzero(query_vec).squeeze().tolist()
100
+ values = query_vec[indices].tolist()
101
+ print(f"Query sparsity: {len(indices)} / {query_vec.shape[0]} active dimensions")
102
+ ```
103
+
104
+ ### Using Sparse Vectors for Retrieval
105
+
106
+ ```python
107
+ # Build inverted index from documents
108
+ inverted_index = {}
109
+
110
+ def add_to_index(doc_id, text):
111
+ """Add document to inverted index"""
112
+ sparse_vec = encode_sparse(text, is_query=False)
113
+
114
+ for idx, value in zip(sparse_vec["indices"], sparse_vec["values"]):
115
+ if idx not in inverted_index:
116
+ inverted_index[idx] = []
117
+ inverted_index[idx].append((doc_id, value))
118
+
119
+ # Index documents
120
+ docs = {
121
+ 1: "Brasília é a capital do Brasil",
122
+ 2: "São Paulo é a maior cidade do Brasil",
123
+ 3: "Python é uma linguagem de programação"
124
+ }
125
+
126
+ for doc_id, text in docs.items():
127
+ add_to_index(doc_id, text)
128
+
129
+ # Search using inverted index
130
+ def search(query, top_k=5):
131
+ """Search documents using sparse vectors"""
132
+ query_vec = encode_sparse(query, is_query=True)
133
+
134
+ # Calculate scores for each document
135
+ scores = {}
136
+ for idx, q_value in zip(query_vec["indices"], query_vec["values"]):
137
+ if idx in inverted_index:
138
+ for doc_id, d_value in inverted_index[idx]:
139
+ scores[doc_id] = scores.get(doc_id, 0) + (q_value * d_value)
140
+
141
+ # Sort by score
142
+ results = sorted(scores.items(), key=lambda x: x[1], reverse=True)[:top_k]
143
+ return [(doc_id, docs[doc_id], score) for doc_id, score in results]
144
+
145
+ # Example search
146
+ results = search("capital brasileira", top_k=3)
147
+ for doc_id, text, score in results:
148
+ print(f"Score: {score:.2f} - {text}")
149
+ ```
150
+
151
+ ## Performance
152
+
153
+ ### Evaluation Metrics
154
+
155
+ *Metrics will be updated after complete evaluation on validation set.*
156
+
157
+ Expected performance on Portuguese retrieval tasks:
158
+ - **MRR@10**: ~0.25-0.35
159
+ - **Recall@100**: ~0.85-0.95
160
+ - **L0 (Sparsity)**: ~100-150 active dimensions
161
+
162
+ ### Comparison with Original SPLADE
163
+
164
+ The original SPLADE model was trained on English data. Key differences:
165
+
166
+ | Aspect | Original SPLADE | SPLADE-PT-BR |
167
+ |--------|----------------|--------------|
168
+ | Language | English | Portuguese |
169
+ | Base Model | BERT-base-uncased | BERTimbau (BERT-base-cased-pt) |
170
+ | Vocabulary | 30,522 tokens | 29,794 tokens |
171
+ | Training Data | MS MARCO | mMARCO Portuguese |
172
+ | Query Expansion | English context | Portuguese context |
173
+
174
+ **Advantages for Portuguese:**
175
+ - Native vocabulary tokens (no subword splitting for Portuguese words)
176
+ - Semantic expansion using Portuguese linguistic patterns
177
+ - Better performance on Brazilian Portuguese queries
178
+
179
+ ## Model Architecture
180
+
181
+ ```
182
+ Input Text → BERTimbau Tokenizer → BERT Encoder → MLM Head →
183
+ ReLU → log(1 + x) → Attention Masking → Max/Sum Pooling → Sparse Vector
184
+ ```
185
+
186
+ The model outputs a vector of size 29,794 (vocabulary size) where:
187
+ - Most values are exactly 0 (sparse)
188
+ - Non-zero values represent term importance + learned expansions
189
+ - Can be used directly with inverted indexes
190
+
191
+ ## Limitations
192
+
193
+ - **Language**: Optimized for Brazilian Portuguese; may work for European Portuguese but not tested
194
+ - **Domain**: Trained on general question-answering; may need fine-tuning for specific domains
195
+ - **Sequence Length**: Maximum 256 tokens; longer documents should be split
196
+ - **Computational Cost**: Requires GPU for efficient encoding of large collections
197
+
198
+ ## Citation
199
+
200
+ If you use this model, please cite:
201
+
202
+ ```bibtex
203
+ @misc{splade-pt-br-2025,
204
+ author = {Axel Chepanski},
205
+ title = {SPLADE-PT-BR: Sparse Retrieval for Portuguese},
206
+ year = {2025},
207
+ publisher = {Hugging Face},
208
+ url = {https://huggingface.co/AxelPCG/splade-pt-br}
209
+ }
210
+ ```
211
+
212
+ Original SPLADE paper:
213
+
214
+ ```bibtex
215
+ @inproceedings{formal2021splade,
216
+ title={SPLADE: Sparse Lexical and Expansion Model for First Stage Ranking},
217
+ author={Formal, Thibault and Piwowarski, Benjamin and Clinchant, St{\'e}phane},
218
+ booktitle={Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval},
219
+ pages={2288--2292},
220
+ year={2021}
221
+ }
222
+ ```
223
+
224
+ ## License
225
+
226
+ Apache 2.0
227
+
228
+ ## Contact
229
+
230
+ For questions or issues, please open an issue on the [GitHub repository](https://github.com/AxelPCG/SPLADE-PT-BR).
231
+
config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Splade"
4
+ ],
5
+ "model_type": "splade",
6
+ "base_model": "neuralmind/bert-base-portuguese-cased",
7
+ "vocab_size": 29794,
8
+ "hidden_size": 768,
9
+ "num_hidden_layers": 12,
10
+ "num_attention_heads": 12,
11
+ "intermediate_size": 3072,
12
+ "hidden_act": "gelu",
13
+ "hidden_dropout_prob": 0.1,
14
+ "attention_probs_dropout_prob": 0.1,
15
+ "max_position_embeddings": 512,
16
+ "type_vocab_size": 2,
17
+ "initializer_range": 0.02,
18
+ "layer_norm_eps": 1e-12,
19
+ "aggregation": "max",
20
+ "fp16": true
21
+ }
config.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ data:
2
+ type: triplets
3
+ TRAIN_DATA_DIR: /home/user/Projects/SPLADE-PT-BR/splade/data/pt/triplets
4
+ VALIDATION_DATA_DIR: /home/user/Projects/SPLADE-PT-BR/splade/data/pt/val_retrieval
5
+ QREL_PATH: /home/user/Projects/SPLADE-PT-BR/splade/data/pt/val_retrieval/qrel.json
6
+ train:
7
+ model:
8
+ _target_: splade.models.transformer_rep.Splade
9
+ model_type_or_dir: neuralmind/bert-base-portuguese-cased
10
+ config:
11
+ lr: 2.0e-05
12
+ seed: 123
13
+ gradient_accumulation_steps: 4
14
+ weight_decay: 0.01
15
+ validation_metrics:
16
+ - MRR@10
17
+ pretrained_no_yaml_config: false
18
+ nb_iterations: 150000
19
+ train_batch_size: 8
20
+ eval_batch_size: 16
21
+ index_retrieval_batch_size: 16
22
+ record_frequency: 1000
23
+ train_monitoring_freq: 500
24
+ warmup_steps: 6000
25
+ max_length: 256
26
+ fp16: true
27
+ matching_type: splade
28
+ monitoring_ckpt: true
29
+ tokenizer_type: neuralmind/bert-base-portuguese-cased
30
+ loss: InBatchPairwiseNLL
31
+ checkpoint_dir: experiments/pt/checkpoint
32
+ index_dir: experiments/pt/index
33
+ out_dir: experiments/pt/out
34
+ regularization:
35
+ FLOPS:
36
+ lambda_q: 0.0003
37
+ lambda_d: 0.0001
38
+ T: 50000
39
+ index: {}
40
+ retrieve_evaluate: {}
41
+ flops: {}
42
+ init_dict:
43
+ model_type_or_dir: neuralmind/bert-base-portuguese-cased
44
+ fp16: true
model_metadata.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name": "SPLADE-PT-BR",
3
+ "version": "1.0.0",
4
+ "description": "SPLADE sparse retrieval model trained for Brazilian Portuguese",
5
+ "author": "AxelPCG",
6
+ "release_date": "2025-12-01",
7
+
8
+ "base_model": {
9
+ "name": "neuralmind/bert-base-portuguese-cased",
10
+ "type": "BERTimbau",
11
+ "language": "Portuguese (Brazilian)",
12
+ "vocab_size": 29794
13
+ },
14
+
15
+ "training": {
16
+ "dataset": "mMARCO Portuguese",
17
+ "num_iterations": 150000,
18
+ "final_loss": 0.000047,
19
+ "batch_size": 8,
20
+ "effective_batch_size": 32,
21
+ "gradient_accumulation_steps": 4,
22
+ "learning_rate": 2e-05,
23
+ "weight_decay": 0.01,
24
+ "warmup_steps": 6000,
25
+ "max_length": 256,
26
+ "fp16": true,
27
+ "optimizer": "AdamW",
28
+ "scheduler": "linear_with_warmup",
29
+
30
+ "regularization": {
31
+ "type": "FLOPS",
32
+ "lambda_q": 0.0003,
33
+ "lambda_d": 0.0001,
34
+ "T": 50000
35
+ }
36
+ },
37
+
38
+ "model_specs": {
39
+ "architecture": "SPLADE",
40
+ "aggregation": "max",
41
+ "output_dim": 29794,
42
+ "expected_sparsity": 0.99,
43
+ "avg_active_dims_query": 120,
44
+ "avg_active_dims_doc": 150
45
+ },
46
+
47
+ "performance": {
48
+ "note": "Metrics will be updated after complete evaluation",
49
+ "expected": {
50
+ "MRR@10": "0.25-0.35",
51
+ "Recall@100": "0.85-0.95",
52
+ "Recall@1000": "0.95-0.99"
53
+ }
54
+ },
55
+
56
+ "usage": {
57
+ "primary_use_case": "Sparse vector retrieval for Portuguese RAG systems",
58
+ "recommended_for": [
59
+ "Question answering in Portuguese",
60
+ "Document retrieval with Qdrant",
61
+ "Hybrid search (sparse + dense)",
62
+ "Interpretable search results"
63
+ ],
64
+ "integration": {
65
+ "qdrant": "Use with SparseVectorParams",
66
+ "elasticsearch": "Compatible with sparse_vector field type",
67
+ "custom": "Standard inverted index on non-zero dimensions"
68
+ }
69
+ },
70
+
71
+ "files": {
72
+ "checkpoint": "model_final_checkpoint.tar",
73
+ "config": "config.yaml",
74
+ "tokenizer": "neuralmind/bert-base-portuguese-cased",
75
+ "size_mb": 450
76
+ },
77
+
78
+ "huggingface": {
79
+ "repo_id": "AxelPCG/splade-pt-br",
80
+ "model_type": "splade",
81
+ "pipeline_tag": "feature-extraction",
82
+ "license": "apache-2.0"
83
+ },
84
+
85
+ "comparison_with_original": {
86
+ "original_model": "SPLADE++",
87
+ "original_language": "English",
88
+ "original_mrr10": 0.368,
89
+ "improvements_for_portuguese": [
90
+ "Native Portuguese vocabulary",
91
+ "Contextual expansion in Portuguese",
92
+ "No subword tokenization for PT words",
93
+ "Better semantic understanding of Brazilian Portuguese"
94
+ ]
95
+ },
96
+
97
+ "limitations": [
98
+ "Optimized for Brazilian Portuguese",
99
+ "Not tested on European Portuguese",
100
+ "May require domain adaptation for specialized fields",
101
+ "Max sequence length: 256 tokens"
102
+ ],
103
+
104
+ "citation": {
105
+ "bibtex": "@misc{splade-pt-br-2025, author = {Axel Chepanski}, title = {SPLADE-PT-BR: Sparse Retrieval for Portuguese}, year = {2025}, publisher = {Hugging Face}, url = {https://huggingface.co/AxelPCG/splade-pt-br}}"
106
+ }
107
+ }
108
+
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc862991df523373e5698b3341dd0a99245cd3590a345c2173170bc44b7cb6f0
3
+ size 1307742766
tokenizer_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "tokenizer_class": "BertTokenizer",
3
+ "do_lower_case": false,
4
+ "model_max_length": 256,
5
+ "tokenizer_type": "neuralmind/bert-base-portuguese-cased"
6
+ }