id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
100
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/language-modeling/run_fim.py
|
run_fim.ModelArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(default=None, metadata={'help': "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch."})
model_type: Optional[str] = field(default=None, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES)})
config_overrides: Optional[str] = field(default=None, metadata={'help': 'Override some existing default config settings when a model is trained from scratch. Example: n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
dtype: Optional[str] = field(default=None, metadata={'help': "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the dtype will be automatically derived from the model's weights.", 'choices': ['auto', 'bfloat16', 'float16', 'float32']})
pad_to_multiple_of: bool = field(default=False, metadata={'help': ('Whether to pad the embedding layer to a multiple depending on the device. ', 'For NVIDIA GPUs, this will be a multiple of 8, for TPUs a multiple of 128.')})
attn_implementation: Optional[str] = field(default='sdpa', metadata={'help': 'The attention implementation to use. '})
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError("--config_overrides can't be used in combination with --config_name or --model_name_or_path")
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
'''
def __post_init__(self):
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 2
| 0.03
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 100
| 2
| 95
| 16
| 93
| 3
| 18
| 16
| 16
| 2
| 0
| 1
| 2
|
101
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/text-generation/run_generation.py
|
run_generation._ModelFallbackWrapper
|
from transformers import AutoTokenizer, BloomForCausalLM, BloomTokenizerFast, CTRLLMHeadModel, CTRLTokenizer, GenerationMixin, GPT2LMHeadModel, GPT2Tokenizer, GPTJForCausalLM, LlamaForCausalLM, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, OPTForCausalLM, XLMTokenizer, XLMWithLMHeadModel, XLNetLMHeadModel, XLNetTokenizer
import torch
from transformers.modeling_outputs import CausalLMOutputWithPast
class _ModelFallbackWrapper(GenerationMixin):
__slots__ = ('_optimized', '_default')
def __init__(self, optimized, default):
self._optimized = optimized
self._default = default
def __call__(self, *args, **kwargs):
if kwargs['past_key_values'] is None and self._default.config.use_cache:
kwargs['past_key_values'] = generate_past_key_values(self._default, kwargs['input_ids'].shape[0], 0)
kwargs.pop('position_ids', None)
for k in list(kwargs.keys()):
if kwargs[k] is None or isinstance(kwargs[k], bool):
kwargs.pop(k)
outputs = self._optimized(**kwargs)
lm_logits = outputs[0]
past_key_values = outputs[1]
fixed_output = CausalLMOutputWithPast(loss=None, logits=lm_logits, past_key_values=past_key_values, hidden_states=None, attentions=None)
return fixed_output
def __getattr__(self, item):
return getattr(self._default, item)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, use_cache=None, **kwargs):
return self._default.prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, **kwargs)
def _reorder_cache(self, past_key_values: tuple[tuple[torch.Tensor]], beam_idx: torch.Tensor) -> tuple[tuple[torch.Tensor]]:
"""
This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
[`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
beam_idx at every generation step.
"""
return self._default._reorder_cache(past_key_values, beam_idx)
|
class _ModelFallbackWrapper(GenerationMixin):
def __init__(self, optimized, default):
pass
def __call__(self, *args, **kwargs):
pass
def __getattr__(self, item):
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, use_cache=None, **kwargs):
pass
def _reorder_cache(self, past_key_values: tuple[tuple[torch.Tensor]], beam_idx: torch.Tensor) -> tuple[tuple[torch.Tensor]]:
'''
This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
[`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
beam_idx at every generation step.
'''
pass
| 6
| 1
| 8
| 0
| 7
| 1
| 2
| 0.14
| 1
| 4
| 0
| 0
| 5
| 2
| 5
| 43
| 45
| 5
| 35
| 18
| 25
| 5
| 23
| 14
| 17
| 4
| 1
| 2
| 8
|
102
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/text-classification/run_glue.py
|
run_glue.DataTrainingArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: Optional[str] = field(default=None, metadata={'help': 'The name of the task to train on: ' + ', '.join(task_to_keys.keys())})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
train_file: Optional[str] = field(default=None, metadata={'help': 'A csv or a json file containing the training data.'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'A csv or a json file containing the validation data.'})
test_file: Optional[str] = field(default=None, metadata={'help': 'A csv or a json file containing the test data.'})
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in task_to_keys:
raise ValueError('Unknown task, you should pick one in ' + ','.join(task_to_keys.keys()))
elif self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.')
else:
train_extension = self.train_file.split('.')[-1]
assert train_extension in ['csv', 'json'], '`train_file` should be a csv or a json file.'
validation_extension = self.validation_file.split('.')[-1]
assert validation_extension == train_extension, '`validation_file` should have the same extension (csv or json) as `train_file`.'
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
'''
def __post_init__(self):
pass
| 3
| 1
| 16
| 0
| 16
| 0
| 5
| 0.07
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 91
| 3
| 82
| 16
| 80
| 6
| 24
| 16
| 22
| 5
| 0
| 2
| 5
|
103
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/pytorch-lightning/run_glue.py
|
run_glue.GLUETransformer
|
from transformers import glue_compute_metrics as compute_metrics
from lightning_base import BaseTransformer, add_generic_args, generic_train
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
import os
from argparse import Namespace
import numpy as np
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_processors as processors
import torch
class GLUETransformer(BaseTransformer):
mode = 'sequence-classification'
def __init__(self, hparams):
if isinstance(hparams, dict):
hparams = Namespace(**hparams)
hparams.glue_output_mode = glue_output_modes[hparams.task]
num_labels = glue_tasks_num_labels[hparams.task]
super().__init__(hparams, num_labels, self.mode)
def forward(self, **inputs):
return self.model(**inputs)
def training_step(self, batch, batch_idx):
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ['distilbert', 'bart']:
inputs['token_type_ids'] = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
outputs = self(**inputs)
loss = outputs[0]
lr_scheduler = self.trainer.lr_schedulers[0]['scheduler']
tensorboard_logs = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {'loss': loss, 'log': tensorboard_logs}
def prepare_data(self):
"""Called to initialize data. Use the call to construct features"""
args = self.hparams
processor = processors[args.task]()
self.labels = processor.get_labels()
for mode in ['train', 'dev']:
cached_features_file = self._feature_file(mode)
if os.path.exists(cached_features_file) and (not args.overwrite_cache):
logger.info('Loading features from cached file %s', cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
examples = processor.get_dev_examples(args.data_dir) if mode == 'dev' else processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(examples, self.tokenizer, max_length=args.max_seq_length, label_list=self.labels, output_mode=args.glue_output_mode)
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
def get_dataloader(self, mode: str, batch_size: int, shuffle: bool=False) -> DataLoader:
"""Load datasets. Called after prepare data."""
mode = 'dev' if mode == 'test' else mode
cached_features_file = self._feature_file(mode)
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file, weights_only=True)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if self.hparams.glue_output_mode == 'classification':
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif self.hparams.glue_output_mode == 'regression':
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
return DataLoader(TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels), batch_size=batch_size, shuffle=shuffle)
def validation_step(self, batch, batch_idx):
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ['distilbert', 'bart']:
inputs['token_type_ids'] = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
outputs = self(**inputs)
tmp_eval_loss, logits = outputs[:2]
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
return {'val_loss': tmp_eval_loss.detach().cpu(), 'pred': preds, 'target': out_label_ids}
def _eval_end(self, outputs) -> tuple:
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean().detach().cpu().item()
preds = np.concatenate([x['pred'] for x in outputs], axis=0)
if self.hparams.glue_output_mode == 'classification':
preds = np.argmax(preds, axis=1)
elif self.hparams.glue_output_mode == 'regression':
preds = np.squeeze(preds)
out_label_ids = np.concatenate([x['target'] for x in outputs], axis=0)
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
results = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task, preds, out_label_ids)}
ret = dict(results.items())
ret['log'] = results
return (ret, preds_list, out_label_list)
def validation_epoch_end(self, outputs: list) -> dict:
ret, preds, targets = self._eval_end(outputs)
logs = ret['log']
return {'val_loss': logs['val_loss'], 'log': logs, 'progress_bar': logs}
def test_epoch_end(self, outputs) -> dict:
ret, predictions, targets = self._eval_end(outputs)
logs = ret['log']
return {'avg_test_loss': logs['val_loss'], 'log': logs, 'progress_bar': logs}
@staticmethod
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--task', default='', type=str, required=True, help='The GLUE task to run')
parser.add_argument('--gpus', default=0, type=int, help='The number of GPUs allocated for this, it is by default 0 meaning none')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
return parser
|
class GLUETransformer(BaseTransformer):
def __init__(self, hparams):
pass
def forward(self, **inputs):
pass
def training_step(self, batch, batch_idx):
pass
def prepare_data(self):
'''Called to initialize data. Use the call to construct features'''
pass
def get_dataloader(self, mode: str, batch_size: int, shuffle: bool=False) -> DataLoader:
'''Load datasets. Called after prepare data.'''
pass
def validation_step(self, batch, batch_idx):
pass
def _eval_end(self, outputs) -> tuple:
pass
def validation_epoch_end(self, outputs: list) -> dict:
pass
def test_epoch_end(self, outputs) -> dict:
pass
@staticmethod
def add_model_specific_args(parser, root_dir):
pass
| 12
| 2
| 14
| 2
| 12
| 0
| 2
| 0.03
| 1
| 11
| 0
| 0
| 9
| 2
| 10
| 25
| 152
| 28
| 120
| 49
| 108
| 4
| 81
| 47
| 70
| 4
| 2
| 2
| 23
|
104
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/text-classification/run_glue.py
|
run_glue.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
ignore_mismatched_sizes: bool = field(default=False, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.07
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 1
| 45
| 10
| 44
| 3
| 10
| 10
| 9
| 0
| 0
| 0
| 0
|
105
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/image-classification/run_image_classification.py
|
run_image_classification.DataTrainingArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify
them on the command line.
"""
dataset_name: Optional[str] = field(default=None, metadata={'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_dir: Optional[str] = field(default=None, metadata={'help': 'A folder containing the training data.'})
validation_dir: Optional[str] = field(default=None, metadata={'help': 'A folder containing the validation data.'})
train_val_split: Optional[float] = field(default=0.15, metadata={'help': 'Percent to split off of train for validation.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
image_column_name: str = field(default='image', metadata={'help': "The name of the dataset column containing the image data. Defaults to 'image'."})
label_column_name: str = field(default='label', metadata={'help': "The name of the dataset column containing the labels. Defaults to 'label'."})
def __post_init__(self):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError('You must specify either a dataset name from the hub or a train and/or validation directory.')
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify
them on the command line.
'''
def __post_init__(self):
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 2
| 0.11
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 53
| 2
| 46
| 11
| 44
| 5
| 13
| 11
| 11
| 2
| 0
| 1
| 2
|
106
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/image-classification/run_image_classification.py
|
run_image_classification.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(default='google/vit-base-patch16-224-in21k', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
model_type: Optional[str] = field(default=None, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES)})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
image_processor_name: str = field(default=None, metadata={'help': 'Name or path of preprocessor config.'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
ignore_mismatched_sizes: bool = field(default=False, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.07
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 1
| 43
| 10
| 42
| 3
| 10
| 10
| 9
| 0
| 0
| 0
| 0
|
107
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/instance-segmentation/run_instance_segmentation.py
|
run_instance_segmentation.Evaluator
|
from torchmetrics.detection.mean_ap import MeanAveragePrecision
from collections.abc import Mapping
from transformers.trainer import EvalPrediction
import torch
from transformers import AutoImageProcessor, AutoModelForUniversalSegmentation, HfArgumentParser, Trainer, TrainingArguments
class Evaluator:
"""
Compute metrics for the instance segmentation task.
"""
def __init__(self, image_processor: AutoImageProcessor, id2label: Mapping[int, str], threshold: float=0.0):
"""
Initialize evaluator with image processor, id2label mapping and threshold for filtering predictions.
Args:
image_processor (AutoImageProcessor): Image processor for
`post_process_instance_segmentation` method.
id2label (Mapping[int, str]): Mapping from class id to class name.
threshold (float): Threshold to filter predicted boxes by confidence. Defaults to 0.0.
"""
self.image_processor = image_processor
self.id2label = id2label
self.threshold = threshold
self.metric = self.get_metric()
def get_metric(self):
metric = MeanAveragePrecision(iou_type='segm', class_metrics=True)
return metric
def reset_metric(self):
self.metric.reset()
def postprocess_target_batch(self, target_batch) -> list[dict[str, torch.Tensor]]:
"""Collect targets in a form of list of dictionaries with keys "masks", "labels"."""
batch_masks = target_batch[0]
batch_labels = target_batch[1]
post_processed_targets = []
for masks, labels in zip(batch_masks, batch_labels):
post_processed_targets.append({'masks': masks.to(dtype=torch.bool), 'labels': labels})
return post_processed_targets
def get_target_sizes(self, post_processed_targets) -> list[list[int]]:
target_sizes = []
for target in post_processed_targets:
target_sizes.append(target['masks'].shape[-2:])
return target_sizes
def postprocess_prediction_batch(self, prediction_batch, target_sizes) -> list[dict[str, torch.Tensor]]:
"""Collect predictions in a form of list of dictionaries with keys "masks", "labels", "scores"."""
model_output = ModelOutput(class_queries_logits=prediction_batch[0], masks_queries_logits=prediction_batch[1])
post_processed_output = self.image_processor.post_process_instance_segmentation(model_output, threshold=self.threshold, target_sizes=target_sizes, return_binary_maps=True)
post_processed_predictions = []
for image_predictions, target_size in zip(post_processed_output, target_sizes):
if image_predictions['segments_info']:
post_processed_image_prediction = {'masks': image_predictions['segmentation'].to(dtype=torch.bool), 'labels': torch.tensor([x['label_id'] for x in image_predictions['segments_info']]), 'scores': torch.tensor([x['score'] for x in image_predictions['segments_info']])}
else:
post_processed_image_prediction = {'masks': torch.zeros([0, *target_size], dtype=torch.bool), 'labels': torch.tensor([]), 'scores': torch.tensor([])}
post_processed_predictions.append(post_processed_image_prediction)
return post_processed_predictions
@torch.no_grad()
def __call__(self, evaluation_results: EvalPrediction, compute_result: bool=False) -> Mapping[str, float]:
"""
Update metrics with current evaluation results and return metrics if `compute_result` is True.
Args:
evaluation_results (EvalPrediction): Predictions and targets from evaluation.
compute_result (bool): Whether to compute and return metrics.
Returns:
Mapping[str, float]: Metrics in a form of dictionary {<metric_name>: <metric_value>}
"""
prediction_batch = nested_cpu(evaluation_results.predictions)
target_batch = nested_cpu(evaluation_results.label_ids)
post_processed_targets = self.postprocess_target_batch(target_batch)
target_sizes = self.get_target_sizes(post_processed_targets)
post_processed_predictions = self.postprocess_prediction_batch(prediction_batch, target_sizes)
self.metric.update(post_processed_predictions, post_processed_targets)
if not compute_result:
return
metrics = self.metric.compute()
classes = metrics.pop('classes')
map_per_class = metrics.pop('map_per_class')
mar_100_per_class = metrics.pop('mar_100_per_class')
for class_id, class_map, class_mar in zip(classes, map_per_class, mar_100_per_class):
class_name = self.id2label[class_id.item()] if self.id2label is not None else class_id.item()
metrics[f'map_{class_name}'] = class_map
metrics[f'mar_100_{class_name}'] = class_mar
metrics = {k: round(v.item(), 4) for k, v in metrics.items()}
self.reset_metric()
return metrics
|
class Evaluator:
'''
Compute metrics for the instance segmentation task.
'''
def __init__(self, image_processor: AutoImageProcessor, id2label: Mapping[int, str], threshold: float=0.0):
'''
Initialize evaluator with image processor, id2label mapping and threshold for filtering predictions.
Args:
image_processor (AutoImageProcessor): Image processor for
`post_process_instance_segmentation` method.
id2label (Mapping[int, str]): Mapping from class id to class name.
threshold (float): Threshold to filter predicted boxes by confidence. Defaults to 0.0.
'''
pass
def get_metric(self):
pass
def reset_metric(self):
pass
def postprocess_target_batch(self, target_batch) -> list[dict[str, torch.Tensor]]:
'''Collect targets in a form of list of dictionaries with keys "masks", "labels".'''
pass
def get_target_sizes(self, post_processed_targets) -> list[list[int]]:
pass
def postprocess_prediction_batch(self, prediction_batch, target_sizes) -> list[dict[str, torch.Tensor]]:
'''Collect predictions in a form of list of dictionaries with keys "masks", "labels", "scores".'''
pass
@torch.no_grad()
def __call__(self, evaluation_results: EvalPrediction, compute_result: bool=False) -> Mapping[str, float]:
'''
Update metrics with current evaluation results and return metrics if `compute_result` is True.
Args:
evaluation_results (EvalPrediction): Predictions and targets from evaluation.
compute_result (bool): Whether to compute and return metrics.
Returns:
Mapping[str, float]: Metrics in a form of dictionary {<metric_name>: <metric_value>}
'''
pass
| 9
| 5
| 16
| 2
| 11
| 4
| 2
| 0.36
| 0
| 9
| 1
| 0
| 7
| 4
| 7
| 7
| 127
| 21
| 78
| 41
| 64
| 28
| 53
| 35
| 45
| 4
| 0
| 2
| 14
|
108
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/instance-segmentation/run_instance_segmentation.py
|
run_instance_segmentation.ModelOutput
|
from dataclasses import dataclass, field
import torch
@dataclass
class ModelOutput:
class_queries_logits: torch.Tensor
masks_queries_logits: torch.Tensor
|
@dataclass
class ModelOutput:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 0
| 0
| 0
|
109
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/run_language_modeling.py
|
run_language_modeling.DataTrainingArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
train_data_files: Optional[str] = field(default=None, metadata={'help': 'The input training data files (multiple files in glob format). Very often splitting large files to smaller files can prevent tokenizer going out of memory'})
eval_data_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
train_ref_file: Optional[str] = field(default=None, metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'})
eval_ref_file: Optional[str] = field(default=None, metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'})
line_by_line: bool = field(default=False, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'})
mlm: bool = field(default=False, metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'})
whole_word_mask: bool = field(default=False, metadata={'help': 'Whether ot not to use whole word mask.'})
mlm_probability: float = field(default=0.15, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'})
plm_probability: float = field(default=1 / 6, metadata={'help': 'Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling.'})
max_span_length: int = field(default=5, metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'})
block_size: int = field(default=-1, metadata={'help': 'Optional input sequence length after tokenization. The training dataset will be truncated in block of this size for training.Default to the model max input length for single sentence inputs (take into account special tokens).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 3
| 61
| 14
| 60
| 3
| 14
| 14
| 13
| 0
| 0
| 0
| 0
|
110
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/run_language_modeling.py
|
run_language_modeling.ModelArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(default=None, metadata={'help': 'The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.'})
model_type: Optional[str] = field(default=None, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES)})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.13
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 1
| 24
| 6
| 23
| 3
| 6
| 6
| 5
| 0
| 0
| 0
| 0
|
111
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/image-pretraining/run_mae.py
|
run_mae.CustomTrainingArguments
|
from dataclasses import dataclass, field
from transformers import HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining
@dataclass
class CustomTrainingArguments(TrainingArguments):
base_learning_rate: float = field(default=0.001, metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'})
|
@dataclass
class CustomTrainingArguments(TrainingArguments):
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 4
| 0
| 4
| 2
| 3
| 0
| 2
| 2
| 1
| 0
| 1
| 0
| 0
|
112
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/image-pretraining/run_mae.py
|
run_mae.DataTrainingArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
dataset_name: Optional[str] = field(default='cifar10', metadata={'help': 'Name of a dataset from the datasets package'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
image_column_name: Optional[str] = field(default=None, metadata={'help': 'The column name of the images in the files.'})
train_dir: Optional[str] = field(default=None, metadata={'help': 'A folder containing the training data.'})
validation_dir: Optional[str] = field(default=None, metadata={'help': 'A folder containing the validation data.'})
train_val_split: Optional[float] = field(default=0.15, metadata={'help': 'Percent to split off of train for validation.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
def __post_init__(self):
data_files = {}
if self.train_dir is not None:
data_files['train'] = self.train_dir
if self.validation_dir is not None:
data_files['val'] = self.validation_dir
self.data_files = data_files if data_files else None
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
'''
def __post_init__(self):
pass
| 3
| 1
| 7
| 0
| 7
| 0
| 4
| 0.12
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 58
| 2
| 50
| 13
| 48
| 6
| 17
| 13
| 15
| 4
| 0
| 1
| 4
|
113
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/image-pretraining/run_mae.py
|
run_mae.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/image processor we are going to pre-train.
"""
model_name_or_path: str = field(default=None, metadata={'help': "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch."})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'})
config_overrides: Optional[str] = field(default=None, metadata={'help': 'Override some existing default config settings when a model is trained from scratch. Example: n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
image_processor_name: str = field(default=None, metadata={'help': 'Name or path of preprocessor config.'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
mask_ratio: float = field(default=0.75, metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'})
norm_pix_loss: bool = field(default=True, metadata={'help': 'Whether or not to train with normalized pixel values as target.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/image processor we are going to pre-train.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.07
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 1
| 44
| 10
| 43
| 3
| 10
| 10
| 9
| 0
| 0
| 0
| 0
|
114
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/image-pretraining/run_mim.py
|
run_mim.DataTrainingArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to
specify them on the command line.
"""
dataset_name: Optional[str] = field(default='cifar10', metadata={'help': 'Name of a dataset from the datasets package'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
image_column_name: Optional[str] = field(default=None, metadata={'help': "The column name of the images in the files. If not set, will try to use 'image' or 'img'."})
train_dir: Optional[str] = field(default=None, metadata={'help': 'A folder containing the training data.'})
validation_dir: Optional[str] = field(default=None, metadata={'help': 'A folder containing the validation data.'})
train_val_split: Optional[float] = field(default=0.15, metadata={'help': 'Percent to split off of train for validation.'})
mask_patch_size: int = field(default=32, metadata={'help': 'The size of the square patches to use for masking.'})
mask_ratio: float = field(default=0.6, metadata={'help': 'Percentage of patches to mask.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
def __post_init__(self):
data_files = {}
if self.train_dir is not None:
data_files['train'] = self.train_dir
if self.validation_dir is not None:
data_files['val'] = self.validation_dir
self.data_files = data_files if data_files else None
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to
specify them on the command line.
'''
def __post_init__(self):
pass
| 3
| 1
| 7
| 0
| 7
| 0
| 4
| 0.11
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 53
| 2
| 46
| 14
| 44
| 5
| 18
| 14
| 16
| 4
| 0
| 1
| 4
|
115
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/image-pretraining/run_mim.py
|
run_mim.MaskGenerator
|
import torch
import numpy as np
class MaskGenerator:
"""
A class to generate boolean masks for the pretraining task.
A mask is a 1D tensor of shape (model_patch_size**2,) where the value is either 0 or 1,
where 1 indicates "masked".
"""
def __init__(self, input_size=192, mask_patch_size=32, model_patch_size=4, mask_ratio=0.6):
self.input_size = input_size
self.mask_patch_size = mask_patch_size
self.model_patch_size = model_patch_size
self.mask_ratio = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size')
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size')
self.rand_size = self.input_size // self.mask_patch_size
self.scale = self.mask_patch_size // self.model_patch_size
self.token_count = self.rand_size ** 2
self.mask_count = int(np.ceil(self.token_count * self.mask_ratio))
def __call__(self):
mask_idx = np.random.permutation(self.token_count)[:self.mask_count]
mask = np.zeros(self.token_count, dtype=int)
mask[mask_idx] = 1
mask = mask.reshape((self.rand_size, self.rand_size))
mask = mask.repeat(self.scale, axis=0).repeat(self.scale, axis=1)
return torch.tensor(mask.flatten())
|
class MaskGenerator:
'''
A class to generate boolean masks for the pretraining task.
A mask is a 1D tensor of shape (model_patch_size**2,) where the value is either 0 or 1,
where 1 indicates "masked".
'''
def __init__(self, input_size=192, mask_patch_size=32, model_patch_size=4, mask_ratio=0.6):
pass
def __call__(self):
pass
| 3
| 1
| 13
| 3
| 10
| 0
| 2
| 0.24
| 0
| 2
| 0
| 0
| 2
| 8
| 2
| 2
| 34
| 8
| 21
| 13
| 18
| 5
| 21
| 13
| 18
| 3
| 0
| 1
| 4
|
116
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/image-pretraining/run_mim.py
|
run_mim.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/image processor we are going to pre-train.
"""
model_name_or_path: str = field(default=None, metadata={'help': "The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a checkpoint identifier on the hub. Don't set if you want to train a model from scratch."})
model_type: Optional[str] = field(default=None, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES)})
config_name_or_path: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
config_overrides: Optional[str] = field(default=None, metadata={'help': 'Override some existing default config settings when a model is trained from scratch. Example: n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
image_processor_name: str = field(default=None, metadata={'help': 'Name or path of preprocessor config.'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
image_size: Optional[int] = field(default=None, metadata={'help': 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'})
patch_size: Optional[int] = field(default=None, metadata={'help': 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'})
encoder_stride: Optional[int] = field(default=None, metadata={'help': 'Stride to use for the encoder.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/image processor we are going to pre-train.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 79
| 1
| 75
| 13
| 74
| 3
| 13
| 13
| 12
| 0
| 0
| 0
| 0
|
117
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/image-pretraining/run_mim_no_trainer.py
|
run_mim_no_trainer.MaskGenerator
|
import numpy as np
import torch
class MaskGenerator:
"""
A class to generate boolean masks for the pretraining task.
A mask is a 1D tensor of shape (model_patch_size**2,) where the value is either 0 or 1,
where 1 indicates "masked".
"""
def __init__(self, input_size=192, mask_patch_size=32, model_patch_size=4, mask_ratio=0.6):
self.input_size = input_size
self.mask_patch_size = mask_patch_size
self.model_patch_size = model_patch_size
self.mask_ratio = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size')
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size')
self.rand_size = self.input_size // self.mask_patch_size
self.scale = self.mask_patch_size // self.model_patch_size
self.token_count = self.rand_size ** 2
self.mask_count = int(np.ceil(self.token_count * self.mask_ratio))
def __call__(self):
mask_idx = np.random.permutation(self.token_count)[:self.mask_count]
mask = np.zeros(self.token_count, dtype=int)
mask[mask_idx] = 1
mask = mask.reshape((self.rand_size, self.rand_size))
mask = mask.repeat(self.scale, axis=0).repeat(self.scale, axis=1)
return torch.tensor(mask.flatten())
|
class MaskGenerator:
'''
A class to generate boolean masks for the pretraining task.
A mask is a 1D tensor of shape (model_patch_size**2,) where the value is either 0 or 1,
where 1 indicates "masked".
'''
def __init__(self, input_size=192, mask_patch_size=32, model_patch_size=4, mask_ratio=0.6):
pass
def __call__(self):
pass
| 3
| 1
| 13
| 3
| 10
| 0
| 2
| 0.24
| 0
| 2
| 0
| 0
| 2
| 8
| 2
| 2
| 34
| 8
| 21
| 13
| 18
| 5
| 21
| 13
| 18
| 3
| 0
| 1
| 4
|
118
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/language-modeling/run_mlm.py
|
run_mlm.DataTrainingArguments
|
from dataclasses import dataclass, field
from transformers.utils.versions import require_version
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
validation_split_percentage: Optional[int] = field(default=5, metadata={'help': "The percentage of the train set used as validation set in case there's no validation split"})
max_seq_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated.'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
mlm_probability: float = field(default=0.15, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'})
line_by_line: bool = field(default=False, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
streaming: bool = field(default=False, metadata={'help': 'Enable streaming mode'})
def __post_init__(self):
if self.streaming:
require_version('datasets>=2.0.0', 'The streaming feature requires `datasets>=2.0.0`')
if self.dataset_name is None and self.train_file is None and (self.validation_file is None):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
if extension not in ['csv', 'json', 'txt']:
raise ValueError('`train_file` should be a csv, a json or a txt file.')
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
if extension not in ['csv', 'json', 'txt']:
raise ValueError('`validation_file` should be a csv, a json or a txt file.')
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 15
| 1
| 14
| 0
| 7
| 0.04
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 89
| 3
| 83
| 17
| 81
| 3
| 28
| 17
| 26
| 7
| 0
| 3
| 7
|
119
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/language-modeling/run_mlm.py
|
run_mlm.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(default=None, metadata={'help': "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch."})
model_type: Optional[str] = field(default=None, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES)})
config_overrides: Optional[str] = field(default=None, metadata={'help': 'Override some existing default config settings when a model is trained from scratch. Example: n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
dtype: Optional[str] = field(default=None, metadata={'help': "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the dtype will be automatically derived from the model's weights.", 'choices': ['auto', 'bfloat16', 'float16', 'float32']})
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError("--config_overrides can't be used in combination with --config_name or --model_name_or_path")
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
'''
def __post_init__(self):
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 2
| 0.04
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 88
| 2
| 83
| 14
| 81
| 3
| 16
| 14
| 14
| 2
| 0
| 1
| 2
|
120
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/multiple_choice/run_multiple_choice.py
|
run_multiple_choice.DataTrainingArguments
|
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
from dataclasses import dataclass, field
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name: str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys())})
data_dir: str = field(metadata={'help': 'Should contain the data files for the task.'})
max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 1
| 15
| 5
| 14
| 3
| 5
| 5
| 4
| 0
| 0
| 0
| 0
|
121
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/multiple_choice/run_multiple_choice.py
|
run_multiple_choice.ModelArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.21
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 1
| 14
| 5
| 13
| 3
| 5
| 5
| 4
| 0
| 0
| 0
| 0
|
122
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/token-classification/run_ner.py
|
run_ner.DataTrainingArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: str = field(metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'})
labels: Optional[str] = field(default=None, metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'})
max_seq_length: int = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 1
| 20
| 5
| 19
| 3
| 5
| 5
| 4
| 0
| 0
| 0
| 0
|
123
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/token-classification/run_ner.py
|
run_ner.DataTrainingArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name: Optional[str] = field(default='ner', metadata={'help': 'The name of the task (ner, pos...).'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a csv or JSON file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate on (a csv or JSON file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to predict on (a csv or JSON file).'})
text_column_name: Optional[str] = field(default=None, metadata={'help': 'The column name of text to input in the file (a csv or JSON file).'})
label_column_name: Optional[str] = field(default=None, metadata={'help': 'The column name of label to input in the file (a csv or JSON file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_seq_length: int = field(default=None, metadata={'help': 'The maximum total input sequence length after tokenization. If set, sequences longer than this will be truncated, sequences shorter will be padded.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
label_all_tokens: bool = field(default=False, metadata={'help': 'Whether to put the label for one word on all tokens of generated by that word or just on the one (in which case the other tokens will have a padding index).'})
return_entity_level_metrics: bool = field(default=False, metadata={'help': 'Whether to return all the entity levels during evaluation or just the overall ones.'})
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and (self.validation_file is None):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
assert extension in ['csv', 'json'], '`train_file` should be a csv or a json file.'
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
assert extension in ['csv', 'json'], '`validation_file` should be a csv or a json file.'
self.task_name = self.task_name.lower()
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 11
| 0
| 11
| 0
| 4
| 0.03
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 107
| 2
| 102
| 20
| 100
| 3
| 28
| 20
| 26
| 4
| 0
| 2
| 4
|
124
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/token-classification/run_ner.py
|
run_ner.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
task_type: Optional[str] = field(default='NER', metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
use_fast: bool = field(default=False, metadata={'help': 'Set this flag to use fast tokenization.'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.28
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 1
| 18
| 7
| 17
| 5
| 7
| 7
| 6
| 0
| 0
| 0
| 0
|
125
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/token-classification/run_ner.py
|
run_ner.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
ignore_mismatched_sizes: bool = field(default=False, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.07
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 1
| 41
| 9
| 40
| 3
| 9
| 9
| 8
| 0
| 0
| 0
| 0
|
126
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/pytorch-lightning/run_ner.py
|
run_ner.NERTransformer
|
from utils_ner import TokenClassificationTask
from torch.nn import CrossEntropyLoss
import os
import numpy as np
from argparse import Namespace
from importlib import import_module
import torch
from torch.utils.data import DataLoader, TensorDataset
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score
class NERTransformer(BaseTransformer):
"""
A training module for NER. See BaseTransformer for the core options.
"""
mode = 'token-classification'
def __init__(self, hparams):
if isinstance(hparams, dict):
hparams = Namespace(**hparams)
module = import_module('tasks')
try:
token_classification_task_clazz = getattr(module, hparams.task_type)
self.token_classification_task: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. Available tasks classes are: {TokenClassificationTask.__subclasses__()}')
self.labels = self.token_classification_task.get_labels(hparams.labels)
self.pad_token_label_id = CrossEntropyLoss().ignore_index
super().__init__(hparams, len(self.labels), self.mode)
def forward(self, **inputs):
return self.model(**inputs)
def training_step(self, batch, batch_num):
"""Compute loss and log."""
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
outputs = self(**inputs)
loss = outputs[0]
return {'loss': loss}
def prepare_data(self):
"""Called to initialize data. Use the call to construct features"""
args = self.hparams
for mode in ['train', 'dev', 'test']:
cached_features_file = self._feature_file(mode)
if os.path.exists(cached_features_file) and (not args.overwrite_cache):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file, weights_only=True)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
examples = self.token_classification_task.read_examples_from_file(args.data_dir, mode)
features = self.token_classification_task.convert_examples_to_features(examples, self.labels, args.max_seq_length, self.tokenizer, cls_token_at_end=bool(self.config.model_type in ['xlnet']), cls_token=self.tokenizer.cls_token, cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0, sep_token=self.tokenizer.sep_token, sep_token_extra=False, pad_on_left=bool(self.config.model_type in ['xlnet']), pad_token=self.tokenizer.pad_token_id, pad_token_segment_id=self.tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id)
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
def get_dataloader(self, mode: int, batch_size: int, shuffle: bool=False) -> DataLoader:
"""Load datasets. Called after prepare data."""
cached_features_file = self._feature_file(mode)
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file, weights_only=True)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if features[0].token_type_ids is not None:
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
else:
all_token_type_ids = torch.tensor([0 for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
return DataLoader(TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_label_ids), batch_size=batch_size)
def validation_step(self, batch, batch_nb):
"""Compute validation"""
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
outputs = self(**inputs)
tmp_eval_loss, logits = outputs[:2]
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
return {'val_loss': tmp_eval_loss.detach().cpu(), 'pred': preds, 'target': out_label_ids}
def _eval_end(self, outputs):
"""Evaluation called for both Val and Test"""
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
preds = np.concatenate([x['pred'] for x in outputs], axis=0)
preds = np.argmax(preds, axis=2)
out_label_ids = np.concatenate([x['target'] for x in outputs], axis=0)
label_map = dict(enumerate(self.labels))
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {'val_loss': val_loss_mean, 'accuracy_score': accuracy_score(out_label_list, preds_list), 'precision': precision_score(out_label_list, preds_list), 'recall': recall_score(out_label_list, preds_list), 'f1': f1_score(out_label_list, preds_list)}
ret = dict(results.items())
ret['log'] = results
return (ret, preds_list, out_label_list)
def validation_epoch_end(self, outputs):
ret, preds, targets = self._eval_end(outputs)
logs = ret['log']
return {'val_loss': logs['val_loss'], 'log': logs, 'progress_bar': logs}
def test_epoch_end(self, outputs):
ret, predictions, targets = self._eval_end(outputs)
logs = ret['log']
return {'avg_test_loss': logs['val_loss'], 'log': logs, 'progress_bar': logs}
@staticmethod
def add_model_specific_args(parser, root_dir):
BaseTransformer.add_model_specific_args(parser, root_dir)
parser.add_argument('--task_type', default='NER', type=str, help='Task type to fine tune in training (e.g. NER, POS, etc)')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--labels', default='', type=str, help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.')
parser.add_argument('--gpus', default=0, type=int, help='The number of GPUs allocated for this, it is by default 0 meaning none')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
return parser
|
class NERTransformer(BaseTransformer):
'''
A training module for NER. See BaseTransformer for the core options.
'''
def __init__(self, hparams):
pass
def forward(self, **inputs):
pass
def training_step(self, batch, batch_num):
'''Compute loss and log.'''
pass
def prepare_data(self):
'''Called to initialize data. Use the call to construct features'''
pass
def get_dataloader(self, mode: int, batch_size: int, shuffle: bool=False) -> DataLoader:
'''Load datasets. Called after prepare data.'''
pass
def validation_step(self, batch, batch_nb):
'''Compute validation'''
pass
def _eval_end(self, outputs):
'''Evaluation called for both Val and Test'''
pass
def validation_epoch_end(self, outputs):
pass
def test_epoch_end(self, outputs):
pass
@staticmethod
def add_model_specific_args(parser, root_dir):
pass
| 12
| 6
| 16
| 1
| 14
| 2
| 2
| 0.13
| 1
| 12
| 1
| 0
| 9
| 4
| 10
| 25
| 179
| 20
| 143
| 52
| 131
| 18
| 89
| 50
| 78
| 4
| 2
| 3
| 23
|
127
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/object-detection/run_object_detection.py
|
run_object_detection.ModelArguments
|
from typing import Any, Optional, Union
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(default='facebook/detr-resnet-50', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
image_processor_name: str = field(default=None, metadata={'help': 'Name or path of preprocessor config.'})
ignore_mismatched_sizes: bool = field(default=False, metadata={'help': 'Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.07
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 1
| 41
| 9
| 40
| 3
| 9
| 9
| 8
| 0
| 0
| 0
| 0
|
128
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/object-detection/run_object_detection.py
|
run_object_detection.ModelOutput
|
from dataclasses import dataclass, field
import torch
@dataclass
class ModelOutput:
logits: torch.Tensor
pred_boxes: torch.Tensor
|
@dataclass
class ModelOutput:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 0
| 0
| 0
|
129
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/language-modeling/run_plm.py
|
run_plm.DataTrainingArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
validation_split_percentage: Optional[int] = field(default=5, metadata={'help': "The percentage of the train set used as validation set in case there's no validation split"})
max_seq_length: int = field(default=512, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated.'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
plm_probability: float = field(default=1 / 6, metadata={'help': 'Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling.'})
max_span_length: int = field(default=5, metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'})
line_by_line: bool = field(default=False, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and (self.validation_file is None):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
assert extension in ['csv', 'json', 'txt'], '`train_file` should be a csv, a json or a txt file.'
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
assert extension in ['csv', 'json', 'txt'], '`validation_file` should be a csv, a json or a txt file.'
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 10
| 0
| 10
| 0
| 4
| 0.03
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 102
| 2
| 97
| 18
| 95
| 3
| 25
| 18
| 23
| 4
| 0
| 2
| 4
|
130
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/language-modeling/run_plm.py
|
run_plm.ModelArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(default=None, metadata={'help': "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch."})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
config_overrides: Optional[str] = field(default=None, metadata={'help': 'Override some existing default config settings when a model is trained from scratch. Example: n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError("--config_overrides can't be used in combination with --config_name or --model_name_or_path")
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
'''
def __post_init__(self):
pass
| 3
| 1
| 5
| 0
| 5
| 0
| 2
| 0.05
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 64
| 2
| 59
| 11
| 57
| 3
| 13
| 11
| 11
| 2
| 0
| 1
| 2
|
131
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/question-answering/run_qa.py
|
run_qa.DataTrainingArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to evaluate the perplexity on (a text file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_seq_length: int = field(default=384, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU).'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
version_2_with_negative: bool = field(default=False, metadata={'help': 'If true, some of the examples do not have an answer.'})
null_score_diff_threshold: float = field(default=0.0, metadata={'help': 'The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example. Only useful when `version_2_with_negative=True`.'})
doc_stride: int = field(default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'})
n_best_size: int = field(default=20, metadata={'help': 'The total number of n-best predictions to generate when looking for an answer.'})
max_answer_length: int = field(default=30, metadata={'help': 'The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.'})
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and (self.validation_file is None) and (self.test_file is None):
raise ValueError('Need either a dataset name or a training/validation file/test_file.')
else:
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
assert extension in ['csv', 'json'], '`train_file` should be a csv or a json file.'
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
assert extension in ['csv', 'json'], '`validation_file` should be a csv or a json file.'
if self.test_file is not None:
extension = self.test_file.split('.')[-1]
assert extension in ['csv', 'json'], '`test_file` should be a csv or a json file.'
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 18
| 0
| 18
| 0
| 5
| 0.03
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 121
| 2
| 116
| 20
| 114
| 3
| 30
| 20
| 28
| 5
| 0
| 2
| 5
|
132
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/question-answering/run_qa.py
|
run_qa.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to directory to store the pretrained models downloaded from huggingface.co'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 41
| 1
| 37
| 8
| 36
| 3
| 8
| 8
| 7
| 0
| 0
| 0
| 0
|
133
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/question-answering/run_qa_beam_search.py
|
run_qa_beam_search.DataTrainingArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to test the perplexity on (a text file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_seq_length: int = field(default=384, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU).'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
version_2_with_negative: bool = field(default=False, metadata={'help': 'If true, some of the examples do not have an answer.'})
null_score_diff_threshold: float = field(default=0.0, metadata={'help': 'The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example. Only useful when `version_2_with_negative=True`.'})
doc_stride: int = field(default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'})
n_best_size: int = field(default=20, metadata={'help': 'The total number of n-best predictions to generate when looking for an answer.'})
max_answer_length: int = field(default=30, metadata={'help': 'The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.'})
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and (self.validation_file is None) and (self.test_file is None):
raise ValueError('Need either a dataset name or a training/validation/test file.')
else:
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
assert extension in ['csv', 'json'], '`train_file` should be a csv or a json file.'
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
assert extension in ['csv', 'json'], '`validation_file` should be a csv or a json file.'
if self.test_file is not None:
extension = self.test_file.split('.')[-1]
assert extension in ['csv', 'json'], '`test_file` should be a csv or a json file.'
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 18
| 0
| 18
| 0
| 5
| 0.02
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 131
| 2
| 126
| 21
| 124
| 3
| 31
| 21
| 29
| 5
| 0
| 2
| 5
|
134
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/question-answering/run_qa_beam_search.py
|
run_qa_beam_search.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.11
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 1
| 27
| 7
| 26
| 3
| 7
| 7
| 6
| 0
| 0
| 0
| 0
|
135
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py
|
run_semantic_segmentation.DataTrainingArguments
|
import warnings
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify
them on the command line.
"""
dataset_name: Optional[str] = field(default='segments/sidewalk-semantic', metadata={'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_val_split: Optional[float] = field(default=0.15, metadata={'help': 'Percent to split off of train for validation.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
do_reduce_labels: Optional[bool] = field(default=False, metadata={'help': 'Whether or not to reduce all labels by 1 and replace background by 255.'})
reduce_labels: Optional[bool] = field(default=False, metadata={'help': 'Whether or not to reduce all labels by 1 and replace background by 255.'})
def __post_init__(self):
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError('You must specify either a dataset name from the hub or a train and/or validation directory.')
if self.reduce_labels:
self.do_reduce_labels = self.reduce_labels
warnings.warn('The `reduce_labels` argument is deprecated and will be removed in v4.45. Please use `do_reduce_labels` instead.', FutureWarning)
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify
them on the command line.
'''
def __post_init__(self):
pass
| 3
| 1
| 11
| 0
| 11
| 0
| 3
| 0.1
| 0
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 57
| 2
| 50
| 9
| 48
| 5
| 14
| 9
| 12
| 3
| 0
| 1
| 3
|
136
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py
|
run_semantic_segmentation.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(default='nvidia/mit-b0', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
image_processor_name: str = field(default=None, metadata={'help': 'Name or path of preprocessor config.'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.09
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 1
| 35
| 8
| 34
| 3
| 8
| 8
| 7
| 0
| 0
| 0
| 0
|
137
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/question-answering/run_seq2seq_qa.py
|
run_seq2seq_qa.DataTrainingArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
context_column: Optional[str] = field(default='context', metadata={'help': 'The name of the column in the datasets containing the contexts (for question answering).'})
question_column: Optional[str] = field(default='question', metadata={'help': 'The name of the column in the datasets containing the questions (for question answering).'})
answer_column: Optional[str] = field(default='answers', metadata={'help': 'The name of the column in the datasets containing the answers (for question answering).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to evaluate the perplexity on (a text file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_seq_length: int = field(default=384, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
max_answer_length: int = field(default=30, metadata={'help': 'The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.'})
val_max_answer_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total sequence length for validation target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_answer_length`. This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU).'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
version_2_with_negative: bool = field(default=False, metadata={'help': 'If true, some of the examples do not have an answer.'})
null_score_diff_threshold: float = field(default=0.0, metadata={'help': 'The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example. Only useful when `version_2_with_negative=True`.'})
doc_stride: int = field(default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'})
n_best_size: int = field(default=20, metadata={'help': 'The total number of n-best predictions to generate when looking for an answer.'})
num_beams: Optional[int] = field(default=None, metadata={'help': 'Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
ignore_pad_token_for_loss: bool = field(default=True, metadata={'help': 'Whether to ignore the tokens corresponding to padded labels in the loss computation or not.'})
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and (self.validation_file is None) and (self.test_file is None):
raise ValueError('Need either a dataset name or a training/validation file/test_file.')
else:
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
assert extension in ['csv', 'json'], '`train_file` should be a csv or a json file.'
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
assert extension in ['csv', 'json'], '`validation_file` should be a csv or a json file.'
if self.test_file is not None:
extension = self.test_file.split('.')[-1]
assert extension in ['csv', 'json'], '`test_file` should be a csv or a json file.'
if self.val_max_answer_length is None:
self.val_max_answer_length = self.max_answer_length
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 20
| 0
| 20
| 0
| 6
| 0.02
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 161
| 2
| 156
| 26
| 154
| 3
| 38
| 26
| 36
| 6
| 0
| 2
| 6
|
138
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/question-answering/run_seq2seq_qa.py
|
run_seq2seq_qa.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to directory to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.07
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 1
| 41
| 9
| 40
| 3
| 9
| 9
| 8
| 0
| 0
| 0
| 0
|
139
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py
|
run_speech_recognition_ctc.DataCollatorCTCWithPadding
|
from transformers import AutoConfig, AutoFeatureExtractor, AutoModelForCTC, AutoProcessor, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, Wav2Vec2Processor, set_seed
import torch
from typing import Optional, Union
from dataclasses import dataclass, field
@dataclass
class DataCollatorCTCWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.AutoProcessor`)
The processor used for processing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
processor: AutoProcessor
padding: Union[bool, str] = 'longest'
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
feature_extractor_input_name: Optional[str] = 'input_values'
def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:
input_features = [{self.feature_extractor_input_name: feature[self.feature_extractor_input_name]} for feature in features]
label_features = [{'input_ids': feature['labels']} for feature in features]
batch = self.processor.pad(input_features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt')
labels_batch = self.processor.pad(labels=label_features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors='pt')
labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), -100)
batch['labels'] = labels
if 'attention_mask' in batch:
batch['attention_mask'] = batch['attention_mask'].to(torch.long)
return batch
|
@dataclass
class DataCollatorCTCWithPadding:
'''
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.AutoProcessor`)
The processor used for processing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
'''
def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:
pass
| 3
| 1
| 30
| 5
| 22
| 3
| 2
| 0.93
| 0
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 61
| 7
| 28
| 11
| 26
| 26
| 16
| 11
| 14
| 2
| 0
| 1
| 2
|
140
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py
|
run_speech_recognition_ctc.ModelArguments
|
from typing import Optional, Union
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
tokenizer_name_or_path: Optional[str] = field(default=None, metadata={'help': 'Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
freeze_feature_encoder: bool = field(default=True, metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
attention_dropout: float = field(default=0.0, metadata={'help': 'The dropout ratio for the attention probabilities.'})
activation_dropout: float = field(default=0.0, metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'})
feat_proj_dropout: float = field(default=0.0, metadata={'help': 'The dropout ratio for the projected features.'})
hidden_dropout: float = field(default=0.0, metadata={'help': 'The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.'})
final_dropout: float = field(default=0.0, metadata={'help': 'The dropout probability for the final projection layer.'})
mask_time_prob: float = field(default=0.05, metadata={'help': 'Probability of each feature vector along the time axis to be chosen as the start of the vector span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature vectors will be masked along the time axis.'})
mask_time_length: int = field(default=10, metadata={'help': 'Length of vector span to mask along the time axis.'})
mask_feature_prob: float = field(default=0.0, metadata={'help': 'Probability of each feature vector along the feature axis to be chosen as the start of the vectorspan to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis.'})
mask_feature_length: int = field(default=10, metadata={'help': 'Length of vector span to mask along the feature axis.'})
layerdrop: float = field(default=0.0, metadata={'help': 'The LayerDrop probability.'})
ctc_loss_reduction: Optional[str] = field(default='mean', metadata={'help': "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."})
ctc_zero_infinity: Optional[bool] = field(default=False, metadata={'help': 'Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets.'})
add_adapter: Optional[bool] = field(default=False, metadata={'help': 'Whether a convolutional attention network should be stacked on top of the Wav2Vec2Bert Encoder. Can be veryuseful to downsample the output length.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 1
| 78
| 18
| 77
| 3
| 18
| 18
| 17
| 0
| 0
| 0
| 0
|
141
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py
|
run_speech_recognition_ctc_adapter.DataCollatorCTCWithPadding
|
from transformers import AutoConfig, AutoFeatureExtractor, AutoModelForCTC, AutoProcessor, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, Wav2Vec2Processor, set_seed
import torch
from typing import Optional, Union
from dataclasses import dataclass, field
@dataclass
class DataCollatorCTCWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.AutoProcessor`)
The processor used for processing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
processor: AutoProcessor
padding: Union[bool, str] = 'longest'
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:
input_features = [{'input_values': feature['input_values']} for feature in features]
label_features = [{'input_ids': feature['labels']} for feature in features]
batch = self.processor.pad(input_features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt')
labels_batch = self.processor.pad(labels=label_features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors='pt')
labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), -100)
batch['labels'] = labels
if 'attention_mask' in batch:
batch['attention_mask'] = batch['attention_mask'].to(torch.long)
return batch
|
@dataclass
class DataCollatorCTCWithPadding:
'''
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.AutoProcessor`)
The processor used for processing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
'''
def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:
pass
| 3
| 1
| 28
| 5
| 20
| 3
| 2
| 1.04
| 0
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 58
| 7
| 25
| 10
| 23
| 26
| 15
| 10
| 13
| 2
| 0
| 1
| 2
|
142
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/speech-recognition/run_speech_recognition_ctc_adapter.py
|
run_speech_recognition_ctc_adapter.ModelArguments
|
from dataclasses import dataclass, field
from typing import Optional, Union
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
tokenizer_name_or_path: Optional[str] = field(default=None, metadata={'help': 'Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
final_dropout: float = field(default=0.0, metadata={'help': 'The dropout probability for the final projection layer.'})
mask_time_prob: float = field(default=0.05, metadata={'help': 'Probability of each feature vector along the time axis to be chosen as the start of the vector span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature vectors will be masked along the time axis.'})
mask_time_length: int = field(default=10, metadata={'help': 'Length of vector span to mask along the time axis.'})
mask_feature_prob: float = field(default=0.0, metadata={'help': 'Probability of each feature vector along the feature axis to be chosen as the start of the vectorspan to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis.'})
mask_feature_length: int = field(default=10, metadata={'help': 'Length of vector span to mask along the feature axis.'})
layerdrop: float = field(default=0.0, metadata={'help': 'The LayerDrop probability.'})
ctc_loss_reduction: Optional[str] = field(default='mean', metadata={'help': "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."})
adapter_attn_dim: int = field(default=16, metadata={'help': 'The hidden dimension of the adapter layers that will be randomly initialized and trained. The higher the dimension, the more capacity is given to the adapter weights. Note that only the adapter weights are fine-tuned.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.06
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 1
| 54
| 12
| 53
| 3
| 12
| 12
| 11
| 0
| 0
| 0
| 0
|
143
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py
|
run_speech_recognition_seq2seq.DataCollatorSpeechSeq2SeqWithPadding
|
import torch
from dataclasses import dataclass, field
from typing import Any, Optional, Union
@dataclass
class DataCollatorSpeechSeq2SeqWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor ([`WhisperProcessor`])
The processor used for processing the data.
decoder_start_token_id (`int`)
The begin-of-sentence of the decoder.
forward_attention_mask (`bool`)
Whether to return attention_mask.
"""
processor: Any
decoder_start_token_id: int
forward_attention_mask: bool
def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:
model_input_name = self.processor.model_input_names[0]
input_features = [{model_input_name: feature[model_input_name]} for feature in features]
label_features = [{'input_ids': feature['labels']} for feature in features]
batch = self.processor.feature_extractor.pad(input_features, return_tensors='pt')
if self.forward_attention_mask:
batch['attention_mask'] = torch.LongTensor([feature['attention_mask'] for feature in features])
labels_batch = self.processor.tokenizer.pad(label_features, return_tensors='pt')
labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), -100)
if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
labels = labels[:, 1:]
batch['labels'] = labels
return batch
|
@dataclass
class DataCollatorSpeechSeq2SeqWithPadding:
'''
Data collator that will dynamically pad the inputs received.
Args:
processor ([`WhisperProcessor`])
The processor used for processing the data.
decoder_start_token_id (`int`)
The begin-of-sentence of the decoder.
forward_attention_mask (`bool`)
Whether to return attention_mask.
'''
def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:
pass
| 3
| 1
| 25
| 7
| 13
| 5
| 3
| 0.88
| 0
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 41
| 9
| 17
| 8
| 15
| 15
| 17
| 8
| 15
| 3
| 0
| 1
| 3
|
144
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py
|
run_speech_recognition_seq2seq.DataTrainingArguments
|
from typing import Any, Optional, Union
from dataclasses import dataclass, field
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: str = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"})
text_column_name: str = field(default='text', metadata={'help': "The name of the dataset column containing the text data. Defaults to 'text'"})
max_duration_in_seconds: float = field(default=20.0, metadata={'help': "Truncate audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"})
min_duration_in_seconds: float = field(default=0.0, metadata={'help': 'Filter audio files that are shorter than `min_duration_in_seconds` seconds'})
preprocessing_only: bool = field(default=False, metadata={'help': 'Whether to only do data preprocessing and skip training. This is especially useful when data preprocessing errors out in distributed training due to timeout. In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets can consequently be loaded in distributed training'})
train_split_name: str = field(default='train', metadata={'help': "The name of the training data set split to use (via the datasets library). Defaults to 'train'"})
eval_split_name: str = field(default='test', metadata={'help': "The name of the training data set split to use (via the datasets library). Defaults to 'train'"})
do_lower_case: bool = field(default=True, metadata={'help': 'Whether the target text should be lower cased.'})
language: str = field(default=None, metadata={'help': 'Language for multilingual fine-tuning. This argument should be set for multilingual fine-tuning only. For English speech recognition, it should be set to `None`.'})
task: str = field(default='transcribe', metadata={'help': 'Task, either `transcribe` for speech recognition or `translate` for speech translation.'})
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.03
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 96
| 1
| 92
| 17
| 91
| 3
| 17
| 17
| 16
| 0
| 0
| 0
| 0
|
145
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py
|
run_speech_recognition_seq2seq.ModelArguments
|
from typing import Any, Optional, Union
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
feature_extractor_name: Optional[str] = field(default=None, metadata={'help': 'feature extractor name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
freeze_feature_encoder: bool = field(default=True, metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
freeze_encoder: bool = field(default=False, metadata={'help': 'Whether to freeze the entire encoder of the seq2seq model.'})
forced_decoder_ids: list[list[int]] = field(default=None, metadata={'help': 'Deprecated. Please use the `language` and `task` arguments instead.'})
suppress_tokens: list[int] = field(default=None, metadata={'help': 'Deprecated. The use of `suppress_tokens` should not be required for the majority of fine-tuning examples.Should you need to use `suppress_tokens`, please manually update them in the fine-tuning script directly.'})
apply_spec_augment: bool = field(default=False, metadata={'help': 'Whether to apply *SpecAugment* data augmentation to the input features. This is currently only relevant for Wav2Vec2, HuBERT, WavLM and Whisper models.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 73
| 1
| 69
| 15
| 68
| 3
| 15
| 15
| 14
| 0
| 0
| 0
| 0
|
146
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/question-answering/run_squad_trainer.py
|
run_squad_trainer.ModelArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
use_fast: bool = field(default=False, metadata={'help': 'Set this flag to use fast tokenization.'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.33
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 1
| 15
| 6
| 14
| 5
| 6
| 6
| 5
| 0
| 0
| 0
| 0
|
147
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/summarization/run_summarization.py
|
run_summarization.DataTrainingArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
lang: Optional[str] = field(default=None, metadata={'help': 'Language id for summarization.'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
text_column: Optional[str] = field(default=None, metadata={'help': 'The name of the column in the datasets containing the full texts (for summarization).'})
summary_column: Optional[str] = field(default=None, metadata={'help': 'The name of the column in the datasets containing the summaries (for summarization).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a jsonlines or csv file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_source_length: Optional[int] = field(default=1024, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
max_target_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
val_max_target_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total sequence length for validation target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
num_beams: Optional[int] = field(default=1, metadata={'help': 'Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
ignore_pad_token_for_loss: bool = field(default=True, metadata={'help': 'Whether to ignore the tokens corresponding to padded labels in the loss computation or not.'})
source_prefix: Optional[str] = field(default=None, metadata={'help': 'A prefix to add before every source text (useful for T5 models).'})
forced_bos_token: Optional[str] = field(default=None, metadata={'help': 'The token to force as the first generated token after the decoder_start_token_id. Useful for multilingual models like mBART where the first generated tokenneeds to be the target language token (Usually it is the target language token)'})
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and (self.validation_file is None) and (self.test_file is None):
raise ValueError('Need either a dataset name or a training, validation, or test file.')
else:
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
assert extension in ['csv', 'json'], '`train_file` should be a csv or a json file.'
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
assert extension in ['csv', 'json'], '`validation_file` should be a csv or a json file.'
if self.test_file is not None:
extension = self.test_file.split('.')[-1]
assert extension in ['csv', 'json'], '`test_file` should be a csv or a json file.'
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 20
| 0
| 20
| 0
| 6
| 0.02
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 161
| 4
| 154
| 24
| 152
| 3
| 36
| 24
| 34
| 6
| 0
| 2
| 6
|
148
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/summarization/run_summarization.py
|
run_summarization.ModelArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
resize_position_embeddings: Optional[bool] = field(default=None, metadata={'help': "Whether to automatically resize the position embeddings if `max_source_length` exceeds the model's position embeddings."})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.06
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 1
| 50
| 10
| 49
| 3
| 10
| 10
| 9
| 0
| 0
| 0
| 0
|
149
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/multiple-choice/run_swag.py
|
run_swag.DataTrainingArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_seq_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total input sequence length after tokenization. If passed, sequences longer than this will be truncated, sequences shorter will be padded.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to the maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
assert extension in ['csv', 'json'], '`train_file` should be a csv or a json file.'
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
assert extension in ['csv', 'json'], '`validation_file` should be a csv or a json file.'
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 7
| 0
| 7
| 0
| 3
| 0.05
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 62
| 2
| 57
| 11
| 55
| 3
| 16
| 11
| 14
| 3
| 0
| 1
| 3
|
150
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/run_swag.py
|
run_swag.InputFeatures
|
class InputFeatures:
def __init__(self, example_id, choices_features, label):
self.example_id = example_id
self.choices_features = [{'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids} for _, input_ids, input_mask, segment_ids in choices_features]
self.label = label
|
class InputFeatures:
def __init__(self, example_id, choices_features, label):
pass
| 2
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 3
| 1
| 1
| 8
| 0
| 8
| 6
| 6
| 0
| 5
| 5
| 3
| 1
| 1
| 0
| 1
|
151
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/multiple-choice/run_swag.py
|
run_swag.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.07
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 1
| 41
| 9
| 40
| 3
| 9
| 9
| 8
| 0
| 0
| 0
| 0
|
152
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/run_swag.py
|
run_swag.SwagExample
|
class SwagExample:
"""A single training/test example for the SWAG dataset."""
def __init__(self, swag_id, context_sentence, start_ending, ending_0, ending_1, ending_2, ending_3, label=None):
self.swag_id = swag_id
self.context_sentence = context_sentence
self.start_ending = start_ending
self.endings = [ending_0, ending_1, ending_2, ending_3]
self.label = label
def __str__(self):
return self.__repr__()
def __repr__(self):
attributes = [f'swag_id: {self.swag_id}', f'context_sentence: {self.context_sentence}', f'start_ending: {self.start_ending}', f'ending_0: {self.endings[0]}', f'ending_1: {self.endings[1]}', f'ending_2: {self.endings[2]}', f'ending_3: {self.endings[3]}']
if self.label is not None:
attributes.append(f'label: {self.label}')
return ', '.join(attributes)
|
class SwagExample:
'''A single training/test example for the SWAG dataset.'''
def __init__(self, swag_id, context_sentence, start_ending, ending_0, ending_1, ending_2, ending_3, label=None):
pass
def __str__(self):
pass
def __repr__(self):
pass
| 4
| 1
| 9
| 1
| 9
| 0
| 1
| 0.04
| 1
| 0
| 0
| 0
| 3
| 5
| 3
| 3
| 33
| 5
| 27
| 10
| 23
| 1
| 14
| 10
| 10
| 2
| 1
| 1
| 4
|
153
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/translation/run_translation.py
|
run_translation.DataTrainingArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
source_lang: str = field(default=None, metadata={'help': 'Source language id for translation.'})
target_lang: str = field(default=None, metadata={'help': 'Target language id for translation.'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a jsonlines).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the metrics (sacrebleu) on a jsonlines file.'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to evaluate the metrics (sacrebleu) on a jsonlines file.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_source_length: Optional[int] = field(default=1024, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
max_target_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
val_max_target_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total sequence length for validation target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
num_beams: Optional[int] = field(default=1, metadata={'help': 'Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
ignore_pad_token_for_loss: bool = field(default=True, metadata={'help': 'Whether to ignore the tokens corresponding to padded labels in the loss computation or not.'})
source_prefix: Optional[str] = field(default=None, metadata={'help': 'A prefix to add before every source text (useful for T5 models).'})
forced_bos_token: Optional[str] = field(default=None, metadata={'help': 'The token to force as the first generated token after the :obj:`decoder_start_token_id`.Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token needs to be the target language token.(Usually it is the target language token)'})
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and (self.validation_file is None):
raise ValueError('Need either a dataset name or a training/validation file.')
elif self.source_lang is None or self.target_lang is None:
raise ValueError('Need to specify the source language and the target language.')
valid_extensions = ['json', 'jsonl']
if self.train_file is not None:
extension = self.train_file.split('.')[-1]
assert extension in valid_extensions, '`train_file` should be a jsonlines file.'
if self.validation_file is not None:
extension = self.validation_file.split('.')[-1]
assert extension in valid_extensions, '`validation_file` should be a jsonlines file.'
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
def __post_init__(self):
pass
| 3
| 1
| 18
| 2
| 14
| 2
| 6
| 0.04
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 145
| 5
| 135
| 24
| 133
| 5
| 34
| 24
| 32
| 6
| 0
| 1
| 6
|
154
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/translation/run_translation.py
|
run_translation.ModelArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether to trust the execution of code from datasets/models defined on the Hub. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.07
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 1
| 41
| 9
| 40
| 3
| 9
| 9
| 8
| 0
| 0
| 0
| 0
|
155
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py
|
run_wav2vec2_pretraining_no_trainer.DataCollatorForWav2Vec2Pretraining
|
import torch
from typing import Optional, Union
from transformers import SchedulerType, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining, get_scheduler, is_wandb_available, set_seed
from dataclasses import dataclass
from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices
@dataclass
class DataCollatorForWav2Vec2Pretraining:
"""
Data collator that will dynamically pad the inputs received and prepare masked indices
for self-supervised pretraining.
Args:
model (:class:`~transformers.Wav2Vec2ForPreTraining`):
The Wav2Vec2 model used for pretraining. The data collator needs to have access
to config and ``_get_feat_extract_output_lengths`` function for correct padding.
feature_extractor (:class:`~transformers.Wav2Vec2FeatureExtractor`):
The processor used for processing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
mask_time_prob (:obj:`float`, `optional`, defaults to :obj:`0.65`):
Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked for the contrastive task.
Note that overlap between masked sequences may decrease the actual percentage of masked vectors.
The default value is taken from the original wav2vec 2.0 article (https://huggingface.co/papers/2006.11477),
and results in about 49 percent of each sequence being masked on average.
mask_time_length (:obj:`int`, `optional`, defaults to :obj:`10`):
Length of each vector mask span to mask along the time axis in the contrastive task. The default value
originates from the original wav2vec 2.0 article and corresponds to the ``M`` variable mentioned there.
"""
model: Wav2Vec2ForPreTraining
feature_extractor: Wav2Vec2FeatureExtractor
padding: Union[bool, str] = 'longest'
pad_to_multiple_of: Optional[int] = None
mask_time_prob: Optional[float] = 0.65
mask_time_length: Optional[int] = 10
def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:
batch = self.feature_extractor.pad(features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt')
device = batch['input_values'].device
batch_size = batch['input_values'].shape[0]
mask_indices_seq_length = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1])
mask_indices_seq_length = int(mask_indices_seq_length)
if batch.get('attention_mask') is not None:
batch['sub_attention_mask'] = self.model._get_feature_vector_attention_mask(mask_indices_seq_length, batch['attention_mask'])
features_shape = (batch_size, mask_indices_seq_length)
mask_time_indices = _compute_mask_indices(features_shape, self.mask_time_prob, self.mask_time_length, attention_mask=batch.get('sub_attention_mask'))
sampled_negative_indices = _sample_negative_indices(features_shape, self.model.config.num_negatives, mask_time_indices=mask_time_indices)
batch['mask_time_indices'] = torch.tensor(mask_time_indices, dtype=torch.long, device=device)
batch['sampled_negative_indices'] = torch.tensor(sampled_negative_indices, dtype=torch.long, device=device)
return batch
|
@dataclass
class DataCollatorForWav2Vec2Pretraining:
'''
Data collator that will dynamically pad the inputs received and prepare masked indices
for self-supervised pretraining.
Args:
model (:class:`~transformers.Wav2Vec2ForPreTraining`):
The Wav2Vec2 model used for pretraining. The data collator needs to have access
to config and ``_get_feat_extract_output_lengths`` function for correct padding.
feature_extractor (:class:`~transformers.Wav2Vec2FeatureExtractor`):
The processor used for processing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
mask_time_prob (:obj:`float`, `optional`, defaults to :obj:`0.65`):
Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked for the contrastive task.
Note that overlap between masked sequences may decrease the actual percentage of masked vectors.
The default value is taken from the original wav2vec 2.0 article (https://huggingface.co/papers/2006.11477),
and results in about 49 percent of each sequence being masked on average.
mask_time_length (:obj:`int`, `optional`, defaults to :obj:`10`):
Length of each vector mask span to mask along the time axis in the contrastive task. The default value
originates from the original wav2vec 2.0 article and corresponds to the ``M`` variable mentioned there.
'''
def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:
pass
| 3
| 1
| 43
| 7
| 30
| 6
| 2
| 1.05
| 0
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 86
| 10
| 37
| 13
| 35
| 39
| 21
| 13
| 19
| 2
| 0
| 1
| 2
|
156
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/text-classification/run_xnli.py
|
run_xnli.ModelArguments
|
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(default=None, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
language: str = field(default=None, metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'})
train_language: Optional[str] = field(default=None, metadata={'help': 'Train language if it is different from the evaluation language.'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
do_lower_case: Optional[bool] = field(default=False, metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `hf auth login` (stored in `~/.huggingface`).'})
trust_remote_code: bool = field(default=False, metadata={'help': 'Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.'})
ignore_mismatched_sizes: bool = field(default=False, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 59
| 1
| 55
| 13
| 54
| 3
| 13
| 13
| 12
| 0
| 0
| 0
| 0
|
157
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/seq2seq/finetune_trainer.py
|
seq2seq.finetune_trainer.DataTrainingArguments
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
data_dir: str = field(metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
task: Optional[str] = field(default='summarization', metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'})
max_source_length: Optional[int] = field(default=1024, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
max_target_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
val_max_target_length: Optional[int] = field(default=142, metadata={'help': 'The maximum total sequence length for validation target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
test_max_target_length: Optional[int] = field(default=142, metadata={'help': 'The maximum total sequence length for test target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
n_train: Optional[int] = field(default=-1, metadata={'help': '# training examples. -1 means use all.'})
n_val: Optional[int] = field(default=-1, metadata={'help': '# validation examples. -1 means use all.'})
n_test: Optional[int] = field(default=-1, metadata={'help': '# test examples. -1 means use all.'})
src_lang: Optional[str] = field(default=None, metadata={'help': 'Source language id for translation.'})
tgt_lang: Optional[str] = field(default=None, metadata={'help': 'Target language id for translation.'})
eval_beams: Optional[int] = field(default=None, metadata={'help': '# num_beams to use for evaluation.'})
ignore_pad_token_for_loss: bool = field(default=True, metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'})
|
@dataclass
class DataTrainingArguments:
'''
Arguments pertaining to what data we are going to input our model for training and eval.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.13
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 1
| 56
| 14
| 55
| 7
| 14
| 14
| 13
| 0
| 0
| 0
| 0
|
158
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/seq2seq/finetune_trainer.py
|
seq2seq.finetune_trainer.ModelArguments
|
from utils import Seq2SeqDataCollator, Seq2SeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
freeze_encoder: bool = field(default=False, metadata={'help': 'Whether tp freeze the encoder.'})
freeze_embeds: bool = field(default=False, metadata={'help': 'Whether to freeze the embeddings.'})
|
@dataclass
class ModelArguments:
'''
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.19
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 1
| 16
| 7
| 15
| 3
| 7
| 7
| 6
| 0
| 0
| 0
| 0
|
159
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/seq2seq/seq2seq_trainer.py
|
seq2seq.seq2seq_trainer.Seq2SeqTrainer
|
from transformers import PreTrainedModel, Trainer, logging
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_xla_available
from transformers.optimization import Adafactor, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup
from transformers.trainer_pt_utils import get_tpu_sampler
from typing import Any, Optional, Union
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
class Seq2SeqTrainer(Trainer):
def __init__(self, config=None, data_args=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if config is None:
assert isinstance(self.model, PreTrainedModel), f'If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is {self.model.__class__}'
self.config = self.model.config
else:
self.config = config
self.data_args = data_args
self.vocab_size = self.config.tgt_vocab_size if isinstance(self.config, FSMTConfig) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, 'Make sure that `config.pad_token_id` is correctly defined when ignoring `pad_token` for loss calculation or doing label smoothing.'
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for padding..')
if self.args.label_smoothing == 0:
self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
else:
from utils import label_smoothed_nll_loss
self.loss_fn = label_smoothed_nll_loss
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for n, p in self.model.named_parameters() if not any((nd in n for nd in no_decay))], 'weight_decay': self.args.weight_decay}, {'params': [p for n, p in self.model.named_parameters() if any((nd in n for nd in no_decay))], 'weight_decay': 0.0}]
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {'scale_parameter': False, 'relative_step': False}
else:
optimizer_cls = torch.optim.AdamW
optimizer_kwargs = {'betas': (self.args.adam_beta1, self.args.adam_beta2), 'eps': self.args.adam_epsilon}
optimizer_kwargs['lr'] = self.args.learning_rate
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
self.lr_scheduler = self._get_lr_scheduler(num_training_steps)
else:
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.')
def _get_lr_scheduler(self, num_training_steps):
schedule_func = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == 'constant':
scheduler = schedule_func(self.optimizer)
elif self.args.lr_scheduler == 'constant_w_warmup':
scheduler = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps)
else:
scheduler = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps)
return scheduler
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset):
return None
elif is_torch_xla_available():
return get_tpu_sampler(self.train_dataset)
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(self.args.per_device_train_batch_size, distributed=self.args.parallel_mode == ParallelMode.DISTRIBUTED)
return RandomSampler(self.train_dataset) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset)
def _compute_loss(self, model, inputs, labels):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
logits = model(**inputs, use_cache=False)[0]
loss = self.loss_fn(logits.view(-1, logits.shape[-1]), labels.view(-1))
else:
loss, logits = model(**inputs, labels=labels, use_cache=False)[:2]
else:
logits = model(**inputs, use_cache=False)[0]
lprobs = torch.nn.functional.log_softmax(logits, dim=-1)
loss, _ = self.loss_fn(lprobs, labels, self.args.label_smoothing, ignore_index=self.config.pad_token_id)
return (loss, logits)
def compute_loss(self, model, inputs):
labels = inputs.pop('labels')
loss, _ = self._compute_loss(model, inputs, labels)
return loss
def prediction_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]]=None) -> tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
Return:
tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
A tuple with the loss, logits and labels (each being optional).
"""
inputs = self._prepare_inputs(inputs)
gen_kwargs = {'max_length': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, 'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams}
if self.args.predict_with_generate and (not self.args.prediction_loss_only):
generated_tokens = self.model.generate(inputs['input_ids'], attention_mask=inputs['attention_mask'], **gen_kwargs)
if generated_tokens.shape[-1] < gen_kwargs['max_length']:
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs['max_length'])
labels = inputs.pop('labels')
with torch.no_grad():
loss, logits = self._compute_loss(model, inputs, labels)
loss = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
logits = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs['max_length']:
labels = self._pad_tensors_to_max_len(labels, gen_kwargs['max_length'])
return (loss, logits, labels)
def _pad_tensors_to_max_len(self, tensor, max_length):
pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(f'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be padded to `max_length`={max_length}')
padded_tensor = pad_token_id * torch.ones((tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device)
padded_tensor[:, :tensor.shape[-1]] = tensor
return padded_tensor
|
class Seq2SeqTrainer(Trainer):
def __init__(self, config=None, data_args=None, *args, **kwargs):
pass
def create_optimizer_and_scheduler(self, num_training_steps: int):
'''
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
'''
pass
def _get_lr_scheduler(self, num_training_steps):
pass
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
pass
def _compute_loss(self, model, inputs, labels):
pass
def compute_loss(self, model, inputs):
pass
def prediction_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]]=None) -> tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
'''
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
Return:
tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
A tuple with the loss, logits and labels (each being optional).
'''
pass
def _pad_tensors_to_max_len(self, tensor, max_length):
pass
| 9
| 2
| 24
| 3
| 18
| 4
| 4
| 0.2
| 1
| 17
| 0
| 0
| 8
| 6
| 8
| 92
| 201
| 29
| 144
| 41
| 128
| 29
| 84
| 34
| 74
| 8
| 1
| 2
| 34
|
160
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/seq2seq/seq2seq_training_args.py
|
seq2seq.seq2seq_training_args.Seq2SeqTrainingArguments
|
from seq2seq_trainer import arg_to_scheduler
from transformers import TrainingArguments
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class Seq2SeqTrainingArguments(TrainingArguments):
"""
Parameters:
label_smoothing (:obj:`float`, `optional`, defaults to 0):
The label smoothing epsilon to apply (if not zero).
sortish_sampler (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to SortishSampler or not. It sorts the inputs according to lengths in-order to minimizing the padding size.
predict_with_generate (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use generate to calculate generative metrics (ROUGE, BLEU).
"""
label_smoothing: Optional[float] = field(default=0.0, metadata={'help': 'The label smoothing epsilon to apply (if not zero).'})
sortish_sampler: bool = field(default=False, metadata={'help': 'Whether to SortishSampler or not.'})
predict_with_generate: bool = field(default=False, metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'})
adafactor: bool = field(default=False, metadata={'help': 'whether to use adafactor'})
encoder_layerdrop: Optional[float] = field(default=None, metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'})
decoder_layerdrop: Optional[float] = field(default=None, metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'})
dropout: Optional[float] = field(default=None, metadata={'help': 'Dropout probability. Goes into model.config.'})
attention_dropout: Optional[float] = field(default=None, metadata={'help': 'Attention dropout probability. Goes into model.config.'})
lr_scheduler: Optional[str] = field(default='linear', metadata={'help': f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}'})
|
@dataclass
class Seq2SeqTrainingArguments(TrainingArguments):
'''
Parameters:
label_smoothing (:obj:`float`, `optional`, defaults to 0):
The label smoothing epsilon to apply (if not zero).
sortish_sampler (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to SortishSampler or not. It sorts the inputs according to lengths in-order to minimizing the padding size.
predict_with_generate (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use generate to calculate generative metrics (ROUGE, BLEU).
'''
pass
| 2
| 1
| 0
| 0
| 0
| 0
| 0
| 0.39
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 33
| 1
| 23
| 10
| 22
| 9
| 10
| 10
| 9
| 0
| 1
| 0
| 0
|
161
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/seq2seq/utils.py
|
seq2seq.utils.AbstractSeq2SeqDataset
|
from transformers.utils import cached_property
import os
from pathlib import Path
from torch.utils.data import Dataset, Sampler
import numpy as np
from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer
class AbstractSeq2SeqDataset(Dataset):
def __init__(self, tokenizer, data_dir, max_source_length, max_target_length, type_path='train', n_obs=None, prefix='', **dataset_kwargs):
super().__init__()
self.src_file = Path(data_dir).joinpath(type_path + '.source')
self.tgt_file = Path(data_dir).joinpath(type_path + '.target')
self.len_file = Path(data_dir).joinpath(type_path + '.len')
if os.path.exists(self.len_file):
self.src_lens = pickle_load(self.len_file)
self.used_char_len = False
else:
self.src_lens = self.get_char_lens(self.src_file)
self.used_char_len = True
self.max_source_length = max_source_length
self.max_target_length = max_target_length
assert min(self.src_lens) > 0, f'found empty line in {self.src_file}'
self.tokenizer = tokenizer
self.prefix = prefix if prefix is not None else ''
if n_obs is not None:
self.src_lens = self.src_lens[:n_obs]
self.pad_token_id = self.tokenizer.pad_token_id
self.dataset_kwargs = dataset_kwargs
dataset_kwargs.update({'add_prefix_space': True} if isinstance(self.tokenizer, BartTokenizer) else {})
def __len__(self):
return len(self.src_lens)
@staticmethod
def get_char_lens(data_file):
return [len(x) for x in Path(data_file).open().readlines()]
@cached_property
def tgt_lens(self):
"""Length in characters of target documents"""
return self.get_char_lens(self.tgt_file)
def make_sortish_sampler(self, batch_size, distributed=False, shuffle=True, **kwargs):
if distributed:
return DistributedSortishSampler(self, batch_size, shuffle=shuffle, **kwargs)
else:
return SortishSampler(self.src_lens, batch_size, shuffle=shuffle)
def make_dynamic_sampler(self, max_tokens_per_batch=1024, **kwargs):
assert FAIRSEQ_AVAILABLE, 'Dynamic batch size requires `pip install fairseq`'
assert not self.used_char_len, 'You must call python make_len_file.py before calling make_dynamic_sampler'
sorted_indices = list(self.make_sortish_sampler(1024, shuffle=False))
def num_tokens_in_example(i):
return min(self.src_lens[i], self.max_target_length)
batch_sampler: list[list[int]] = batch_by_size(sorted_indices, num_tokens_fn=num_tokens_in_example, max_tokens=max_tokens_per_batch, required_batch_size_multiple=64)
shuffled_batches = [batch_sampler[i] for i in np.random.permutation(range(len(batch_sampler)))]
approximate_toks_per_batch = [max((self.src_lens[i] for i in batch)) * len(batch) for batch in shuffled_batches]
largest_batch_idx = np.argmax(approximate_toks_per_batch)
shuffled_batches[0], shuffled_batches[largest_batch_idx] = (shuffled_batches[largest_batch_idx], shuffled_batches[0])
return shuffled_batches
def __getitem__(self, item):
raise NotImplementedError('You must implement this')
def collate_fn(self, batch):
raise NotImplementedError('You must implement this')
|
class AbstractSeq2SeqDataset(Dataset):
def __init__(self, tokenizer, data_dir, max_source_length, max_target_length, type_path='train', n_obs=None, prefix='', **dataset_kwargs):
pass
def __len__(self):
pass
@staticmethod
def get_char_lens(data_file):
pass
@cached_property
def tgt_lens(self):
'''Length in characters of target documents'''
pass
def make_sortish_sampler(self, batch_size, distributed=False, shuffle=True, **kwargs):
pass
def make_dynamic_sampler(self, max_tokens_per_batch=1024, **kwargs):
pass
def num_tokens_in_example(i):
pass
def __getitem__(self, item):
pass
def collate_fn(self, batch):
pass
| 12
| 1
| 8
| 0
| 8
| 0
| 2
| 0.04
| 1
| 9
| 2
| 2
| 7
| 11
| 8
| 8
| 82
| 10
| 69
| 38
| 47
| 3
| 47
| 26
| 37
| 5
| 1
| 1
| 14
|
162
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/seq2seq/utils.py
|
seq2seq.utils.DistributedSortishSampler
|
import torch
import numpy as np
from torch.utils.data import Dataset, Sampler
import torch.distributed as dist
from collections.abc import Iterable
from transformers.utils import cached_property
import math
class DistributedSortishSampler(Sampler):
"""Copied from torch DistributedSampler"""
def __init__(self, dataset, batch_size, num_replicas=None, rank=None, add_extra_examples=True, shuffle=True):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
if add_extra_examples:
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
else:
self.total_size = len(dataset)
self.num_samples = len(self.available_indices)
self.batch_size = batch_size
self.add_extra_examples = add_extra_examples
self.shuffle = shuffle
def __iter__(self) -> Iterable:
g = torch.Generator()
g.manual_seed(self.epoch)
sortish_data = [self.dataset.src_lens[i] for i in self.available_indices]
sortish_indices = sortish_sampler_indices(sortish_data, self.batch_size, shuffle=self.shuffle)
indices = [self.available_indices[i] for i in sortish_indices]
assert len(indices) == self.num_samples
return iter(indices)
@cached_property
def available_indices(self) -> np.array:
indices = list(range(len(self.dataset)))
indices += indices[:self.total_size - len(indices)]
assert len(indices) == self.total_size
available_indices = indices[self.rank:self.total_size:self.num_replicas]
return available_indices
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
class DistributedSortishSampler(Sampler):
'''Copied from torch DistributedSampler'''
def __init__(self, dataset, batch_size, num_replicas=None, rank=None, add_extra_examples=True, shuffle=True):
pass
def __iter__(self) -> Iterable:
pass
@cached_property
def available_indices(self) -> np.array:
pass
def __len__(self):
pass
def set_epoch(self, epoch):
pass
| 7
| 1
| 9
| 0
| 8
| 0
| 2
| 0.07
| 1
| 4
| 0
| 0
| 5
| 9
| 5
| 5
| 51
| 6
| 42
| 22
| 35
| 3
| 40
| 21
| 34
| 6
| 1
| 2
| 10
|
163
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/seq2seq/utils.py
|
seq2seq.utils.LegacySeq2SeqDataset
|
import torch.distributed as dist
import torch
import linecache
class LegacySeq2SeqDataset(AbstractSeq2SeqDataset):
def __getitem__(self, index) -> dict[str, torch.Tensor]:
"""Call tokenizer on src and tgt_lines"""
index = index + 1
source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip('\n')
tgt_line = linecache.getline(str(self.tgt_file), index).rstrip('\n')
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
source_inputs = self.encode_line(self.tokenizer, source_line, self.max_source_length)
target_inputs = self.encode_line(self.tokenizer, tgt_line, self.max_target_length)
source_ids = source_inputs['input_ids'].squeeze()
target_ids = target_inputs['input_ids'].squeeze()
src_mask = source_inputs['attention_mask'].squeeze()
return {'input_ids': source_ids, 'attention_mask': src_mask, 'labels': target_ids}
def encode_line(self, tokenizer, line, max_length, pad_to_max_length=True, return_tensors='pt'):
"""Only used by LegacyDataset"""
return tokenizer([line], max_length=max_length, padding='max_length' if pad_to_max_length else None, truncation=True, return_tensors=return_tensors, **self.dataset_kwargs)
def collate_fn(self, batch) -> dict[str, torch.Tensor]:
input_ids = torch.stack([x['input_ids'] for x in batch])
masks = torch.stack([x['attention_mask'] for x in batch])
target_ids = torch.stack([x['labels'] for x in batch])
pad_token_id = self.pad_token_id
y = trim_batch(target_ids, pad_token_id)
source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks)
batch = {'input_ids': source_ids, 'attention_mask': source_mask, 'labels': y}
return batch
|
class LegacySeq2SeqDataset(AbstractSeq2SeqDataset):
def __getitem__(self, index) -> dict[str, torch.Tensor]:
'''Call tokenizer on src and tgt_lines'''
pass
def encode_line(self, tokenizer, line, max_length, pad_to_max_length=True, return_tensors='pt'):
'''Only used by LegacyDataset'''
pass
def collate_fn(self, batch) -> dict[str, torch.Tensor]:
pass
| 4
| 2
| 14
| 0
| 13
| 1
| 1
| 0.08
| 1
| 2
| 0
| 0
| 3
| 0
| 3
| 11
| 44
| 3
| 39
| 17
| 35
| 3
| 24
| 17
| 20
| 2
| 2
| 0
| 4
|
164
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/seq2seq/utils.py
|
seq2seq.utils.Seq2SeqDataCollator
|
import torch.distributed as dist
import torch
from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer
from transformers.models.bart.modeling_bart import shift_tokens_right
class Seq2SeqDataCollator:
def __init__(self, tokenizer, data_args, decoder_start_token_id, tpu_num_cores=None):
self.tokenizer = tokenizer
self.pad_token_id = tokenizer.pad_token_id
self.decoder_start_token_id = decoder_start_token_id
assert self.pad_token_id is not None, f'pad_token_id is not defined for ({self.tokenizer.__class__.__name__}), it must be defined.'
self.data_args = data_args
self.tpu_num_cores = tpu_num_cores
self.dataset_kwargs = {'add_prefix_space': True} if isinstance(tokenizer, BartTokenizer) else {}
if data_args.src_lang is not None:
self.dataset_kwargs['src_lang'] = data_args.src_lang
if data_args.tgt_lang is not None:
self.dataset_kwargs['tgt_lang'] = data_args.tgt_lang
def __call__(self, batch) -> dict[str, torch.Tensor]:
if hasattr(self.tokenizer, 'prepare_seq2seq_batch'):
batch = self._encode(batch)
input_ids, attention_mask, labels = (batch['input_ids'], batch['attention_mask'], batch['labels'])
else:
input_ids = torch.stack([x['input_ids'] for x in batch])
attention_mask = torch.stack([x['attention_mask'] for x in batch])
labels = torch.stack([x['labels'] for x in batch])
labels = trim_batch(labels, self.pad_token_id)
input_ids, attention_mask = trim_batch(input_ids, self.pad_token_id, attention_mask=attention_mask)
if isinstance(self.tokenizer, T5Tokenizer):
decoder_input_ids = self._shift_right_t5(labels)
else:
decoder_input_ids = shift_tokens_right(labels, self.pad_token_id, self.decoder_start_token_id)
batch = {'input_ids': input_ids, 'attention_mask': attention_mask, 'decoder_input_ids': decoder_input_ids, 'labels': labels}
return batch
def _shift_right_t5(self, input_ids):
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = self.pad_token_id
return shifted_input_ids
def _encode(self, batch) -> dict[str, torch.Tensor]:
batch_encoding = self.tokenizer.prepare_seq2seq_batch([x['src_texts'] for x in batch], tgt_texts=[x['tgt_texts'] for x in batch], max_length=self.data_args.max_source_length, max_target_length=self.data_args.max_target_length, padding='max_length' if self.tpu_num_cores is not None else 'longest', return_tensors='pt', **self.dataset_kwargs)
return batch_encoding.data
|
class Seq2SeqDataCollator:
def __init__(self, tokenizer, data_args, decoder_start_token_id, tpu_num_cores=None):
pass
def __call__(self, batch) -> dict[str, torch.Tensor]:
pass
def _shift_right_t5(self, input_ids):
pass
def _encode(self, batch) -> dict[str, torch.Tensor]:
pass
| 5
| 0
| 15
| 1
| 14
| 1
| 3
| 0.04
| 0
| 4
| 0
| 0
| 4
| 6
| 4
| 4
| 63
| 6
| 56
| 15
| 51
| 2
| 35
| 15
| 30
| 4
| 0
| 1
| 10
|
165
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/seq2seq/utils.py
|
seq2seq.utils.Seq2SeqDataset
|
import torch.distributed as dist
import torch
import linecache
class Seq2SeqDataset(AbstractSeq2SeqDataset):
"""A dataset that calls prepare_seq2seq_batch."""
def __getitem__(self, index) -> dict[str, str]:
index = index + 1
source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip('\n')
tgt_line = linecache.getline(str(self.tgt_file), index).rstrip('\n')
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
return {'tgt_texts': tgt_line, 'src_texts': source_line, 'id': index - 1}
def collate_fn(self, batch) -> dict[str, torch.Tensor]:
"""Call prepare_seq2seq_batch."""
batch_encoding: dict[str, torch.Tensor] = self.tokenizer.prepare_seq2seq_batch([x['src_texts'] for x in batch], tgt_texts=[x['tgt_texts'] for x in batch], max_length=self.max_source_length, max_target_length=self.max_target_length, return_tensors='pt', **self.dataset_kwargs).data
batch_encoding['ids'] = torch.tensor([x['id'] for x in batch])
return batch_encoding
|
class Seq2SeqDataset(AbstractSeq2SeqDataset):
'''A dataset that calls prepare_seq2seq_batch.'''
def __getitem__(self, index) -> dict[str, str]:
pass
def collate_fn(self, batch) -> dict[str, torch.Tensor]:
'''Call prepare_seq2seq_batch.'''
pass
| 3
| 2
| 10
| 0
| 9
| 1
| 1
| 0.16
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 10
| 23
| 2
| 19
| 8
| 16
| 3
| 12
| 6
| 9
| 1
| 2
| 0
| 2
|
166
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/seq2seq/utils.py
|
seq2seq.utils.SortishSampler
|
from torch.utils.data import Dataset, Sampler
class SortishSampler(Sampler):
"""Go through the text data by order of src length with a bit of randomness. From fastai repo."""
def __init__(self, data, batch_size, shuffle=True):
self.data, self.bs, self.shuffle = (data, batch_size, shuffle)
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
return iter(sortish_sampler_indices(self.data, self.bs, shuffle=self.shuffle))
|
class SortishSampler(Sampler):
'''Go through the text data by order of src length with a bit of randomness. From fastai repo.'''
def __init__(self, data, batch_size, shuffle=True):
pass
def __len__(self) -> int:
pass
def __iter__(self):
pass
| 4
| 1
| 2
| 0
| 2
| 0
| 1
| 0.14
| 1
| 1
| 0
| 0
| 3
| 3
| 3
| 3
| 11
| 3
| 7
| 5
| 3
| 1
| 7
| 5
| 3
| 1
| 1
| 0
| 3
|
167
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/setup.py
|
setup.DepsTableUpdateCommand
|
from setuptools import Command, find_packages, setup
class DepsTableUpdateCommand(Command):
"""
A custom distutils command that updates the dependency table.
usage: python setup.py deps_table_update
"""
description = 'build runtime dependency table'
user_options = [('dep-table-update', None, 'updates src/transformers/dependency_versions_table.py')]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
entries = '\n'.join([f' "{k}": "{v}",' for k, v in deps.items()])
content = ['# THIS FILE HAS BEEN AUTOGENERATED. To update:', '# 1. modify the `_deps` dict in setup.py', '# 2. run `make deps_table_update``', 'deps = {', entries, '}', '']
target = 'src/transformers/dependency_versions_table.py'
print(f'updating {target}')
with open(target, 'w', encoding='utf-8', newline='\n') as f:
f.write('\n'.join(content))
|
class DepsTableUpdateCommand(Command):
'''
A custom distutils command that updates the dependency table.
usage: python setup.py deps_table_update
'''
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
| 4
| 1
| 6
| 0
| 6
| 1
| 1
| 0.33
| 1
| 0
| 0
| 0
| 3
| 0
| 3
| 3
| 33
| 4
| 24
| 10
| 20
| 8
| 14
| 9
| 10
| 1
| 1
| 1
| 3
|
168
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/token-classification/tasks.py
|
tasks.Chunk
|
class Chunk(NER):
def __init__(self):
super().__init__(label_idx=-2)
def get_labels(self, path: str) -> list[str]:
if path:
with open(path) as f:
labels = f.read().splitlines()
if 'O' not in labels:
labels = ['O'] + labels
return labels
else:
return ['O', 'B-ADVP', 'B-INTJ', 'B-LST', 'B-PRT', 'B-NP', 'B-SBAR', 'B-VP', 'B-ADJP', 'B-CONJP', 'B-PP', 'I-ADVP', 'I-INTJ', 'I-LST', 'I-PRT', 'I-NP', 'I-SBAR', 'I-VP', 'I-ADJP', 'I-CONJP', 'I-PP']
|
class Chunk(NER):
def __init__(self):
pass
def get_labels(self, path: str) -> list[str]:
pass
| 3
| 0
| 17
| 0
| 17
| 1
| 2
| 0.03
| 1
| 2
| 0
| 0
| 2
| 0
| 2
| 9
| 36
| 1
| 34
| 5
| 31
| 1
| 11
| 4
| 8
| 3
| 2
| 2
| 4
|
169
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/token-classification/tasks.py
|
tasks.NER
|
import os
from typing import TextIO, Union
from utils_ner import InputExample, Split, TokenClassificationTask
class NER(TokenClassificationTask):
def __init__(self, label_idx=-1):
self.label_idx = label_idx
def read_examples_from_file(self, data_dir, mode: Union[Split, str]) -> list[InputExample]:
if isinstance(mode, Split):
mode = mode.value
file_path = os.path.join(data_dir, f'{mode}.txt')
guid_index = 1
examples = []
with open(file_path, encoding='utf-8') as f:
words = []
labels = []
for line in f:
if line.startswith('-DOCSTART-') or line == '' or line == '\n':
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}', words=words, labels=labels))
guid_index += 1
words = []
labels = []
else:
splits = line.split(' ')
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[self.label_idx].replace('\n', ''))
else:
labels.append('O')
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}', words=words, labels=labels))
return examples
def write_predictions_to_file(self, writer: TextIO, test_input_reader: TextIO, preds_list: list):
example_id = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-') or line == '' or line == '\n':
writer.write(line)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
output_line = line.split()[0] + ' ' + preds_list[example_id].pop(0) + '\n'
writer.write(output_line)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
def get_labels(self, path: str) -> list[str]:
if path:
with open(path) as f:
labels = f.read().splitlines()
if 'O' not in labels:
labels = ['O'] + labels
return labels
else:
return ['O', 'B-MISC', 'I-MISC', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC']
|
class NER(TokenClassificationTask):
def __init__(self, label_idx=-1):
pass
def read_examples_from_file(self, data_dir, mode: Union[Split, str]) -> list[InputExample]:
pass
def write_predictions_to_file(self, writer: TextIO, test_input_reader: TextIO, preds_list: list):
pass
def get_labels(self, path: str) -> list[str]:
pass
| 5
| 0
| 13
| 0
| 12
| 1
| 4
| 0.04
| 1
| 4
| 2
| 1
| 4
| 1
| 4
| 7
| 55
| 3
| 50
| 19
| 45
| 2
| 45
| 17
| 40
| 7
| 1
| 4
| 16
|
170
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/legacy/token-classification/tasks.py
|
tasks.POS
|
from conllu import parse_incr
from typing import TextIO, Union
import os
from utils_ner import InputExample, Split, TokenClassificationTask
class POS(TokenClassificationTask):
def read_examples_from_file(self, data_dir, mode: Union[Split, str]) -> list[InputExample]:
if isinstance(mode, Split):
mode = mode.value
file_path = os.path.join(data_dir, f'{mode}.txt')
guid_index = 1
examples = []
with open(file_path, encoding='utf-8') as f:
for sentence in parse_incr(f):
words = []
labels = []
for token in sentence:
words.append(token['form'])
labels.append(token['upos'])
assert len(words) == len(labels)
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}', words=words, labels=labels))
guid_index += 1
return examples
def write_predictions_to_file(self, writer: TextIO, test_input_reader: TextIO, preds_list: list):
example_id = 0
for sentence in parse_incr(test_input_reader):
s_p = preds_list[example_id]
out = ''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0)}) "
out += '\n'
writer.write(out)
example_id += 1
def get_labels(self, path: str) -> list[str]:
if path:
with open(path) as f:
return f.read().splitlines()
else:
return ['ADJ', 'ADP', 'ADV', 'AUX', 'CCONJ', 'DET', 'INTJ', 'NOUN', 'NUM', 'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'VERB', 'X']
|
class POS(TokenClassificationTask):
def read_examples_from_file(self, data_dir, mode: Union[Split, str]) -> list[InputExample]:
pass
def write_predictions_to_file(self, writer: TextIO, test_input_reader: TextIO, preds_list: list):
pass
def get_labels(self, path: str) -> list[str]:
pass
| 4
| 0
| 18
| 0
| 17
| 0
| 3
| 0
| 1
| 4
| 2
| 0
| 3
| 0
| 3
| 6
| 56
| 3
| 53
| 18
| 49
| 0
| 34
| 16
| 30
| 5
| 1
| 3
| 10
|
171
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/question-answering/trainer_qa.py
|
trainer_qa.QuestionAnsweringTrainer
|
import math
import time
from transformers.trainer_utils import PredictionOutput, speed_metrics
from transformers import Trainer, is_torch_xla_available
class QuestionAnsweringTrainer(Trainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str='eval'):
eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = self.eval_examples if eval_examples is None else eval_examples
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
start_time = time.time()
try:
output = eval_loop(eval_dataloader, description='Evaluation', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
finally:
self.compute_metrics = compute_metrics
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(speed_metrics(metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size)))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)
metrics = self.compute_metrics(eval_preds)
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
metrics[f'{metric_key_prefix}_{key}'] = metrics.pop(key)
metrics.update(output.metrics)
else:
metrics = output.metrics
if self.args.should_log:
self.log(metrics)
if self.args.tpu_metrics_debug or self.args.debug:
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str='test'):
predict_dataloader = self.get_test_dataloader(predict_dataset)
compute_metrics = self.compute_metrics
self.compute_metrics = None
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
start_time = time.time()
try:
output = eval_loop(predict_dataloader, description='Prediction', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
finally:
self.compute_metrics = compute_metrics
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(speed_metrics(metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size)))
if self.post_process_function is None or self.compute_metrics is None:
return output
predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, 'predict')
metrics = self.compute_metrics(predictions)
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
metrics[f'{metric_key_prefix}_{key}'] = metrics.pop(key)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)
|
class QuestionAnsweringTrainer(Trainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
pass
def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str='eval'):
pass
def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str='test'):
pass
| 4
| 0
| 35
| 3
| 28
| 4
| 6
| 0.13
| 1
| 4
| 0
| 0
| 3
| 4
| 3
| 3
| 107
| 11
| 85
| 26
| 81
| 11
| 56
| 26
| 52
| 11
| 1
| 3
| 19
|
172
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/examples/pytorch/question-answering/trainer_seq2seq_qa.py
|
trainer_seq2seq_qa.QuestionAnsweringSeq2SeqTrainer
|
from typing import Optional
from torch.utils.data import Dataset
from transformers.trainer_utils import PredictionOutput, speed_metrics
import math
import time
from transformers import Seq2SeqTrainer, is_torch_xla_available
class QuestionAnsweringSeq2SeqTrainer(Seq2SeqTrainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
def evaluate(self, eval_dataset: Optional[Dataset]=None, eval_examples=None, ignore_keys: Optional[list[str]]=None, metric_key_prefix: str='eval', **gen_kwargs) -> dict[str, float]:
gen_kwargs = gen_kwargs.copy()
if gen_kwargs.get('max_length') is None and self.args.generation_max_length is not None:
gen_kwargs['max_length'] = self.args.generation_max_length
if gen_kwargs.get('num_beams') is None and self.args.generation_num_beams is not None:
gen_kwargs['num_beams'] = self.args.generation_num_beams
self._gen_kwargs = gen_kwargs
eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = self.eval_examples if eval_examples is None else eval_examples
compute_metrics = self.compute_metrics
self.compute_metrics = None
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(eval_dataloader, description='Evaluation', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
finally:
self.compute_metrics = compute_metrics
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(speed_metrics(metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size)))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
eval_preds = self.post_process_function(eval_examples, eval_dataset, output)
metrics = self.compute_metrics(eval_preds)
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
metrics[f'{metric_key_prefix}_{key}'] = metrics.pop(key)
metrics.update(output.metrics)
else:
metrics = output.metrics
if self.args.should_log:
self.log(metrics)
if self.args.tpu_metrics_debug or self.args.debug:
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str='test', **gen_kwargs):
self._gen_kwargs = gen_kwargs.copy()
predict_dataloader = self.get_test_dataloader(predict_dataset)
compute_metrics = self.compute_metrics
self.compute_metrics = None
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(predict_dataloader, description='Prediction', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
finally:
self.compute_metrics = compute_metrics
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(speed_metrics(metric_key_prefix, start_time, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size)))
if self.post_process_function is None or self.compute_metrics is None:
return output
predictions = self.post_process_function(predict_examples, predict_dataset, output, 'predict')
metrics = self.compute_metrics(predictions)
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
metrics[f'{metric_key_prefix}_{key}'] = metrics.pop(key)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)
|
class QuestionAnsweringSeq2SeqTrainer(Seq2SeqTrainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
pass
def evaluate(self, eval_dataset: Optional[Dataset]=None, eval_examples=None, ignore_keys: Optional[list[str]]=None, metric_key_prefix: str='eval', **gen_kwargs) -> dict[str, float]:
pass
def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str='test', **gen_kwargs):
pass
| 4
| 0
| 42
| 5
| 33
| 4
| 7
| 0.14
| 1
| 5
| 0
| 0
| 3
| 5
| 3
| 93
| 131
| 16
| 101
| 36
| 88
| 14
| 63
| 27
| 59
| 13
| 2
| 3
| 21
|
173
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.AccurateGELUActivation
|
import math
import torch
from torch import Tensor, nn
class AccurateGELUActivation(nn.Module):
"""
Applies GELU approximation that is faster than default and more accurate than QuickGELU. See:
https://github.com/hendrycks/GELUs
Implemented along with MEGA (Moving Average Equipped Gated Attention)
"""
def __init__(self):
super().__init__()
self.precomputed_constant = math.sqrt(2 / math.pi)
def forward(self, input: Tensor) -> Tensor:
return 0.5 * input * (1 + torch.tanh(self.precomputed_constant * (input + 0.044715 * torch.pow(input, 3))))
|
class AccurateGELUActivation(nn.Module):
'''
Applies GELU approximation that is faster than default and more accurate than QuickGELU. See:
https://github.com/hendrycks/GELUs
Implemented along with MEGA (Moving Average Equipped Gated Attention)
'''
def __init__(self):
pass
def forward(self, input: Tensor) -> Tensor:
pass
| 3
| 1
| 3
| 0
| 3
| 0
| 1
| 0.83
| 1
| 2
| 0
| 0
| 2
| 1
| 2
| 12
| 14
| 3
| 6
| 4
| 3
| 5
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
174
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.ClassInstantier
|
from collections import OrderedDict
class ClassInstantier(OrderedDict):
def __getitem__(self, key):
content = super().__getitem__(key)
cls, kwargs = content if isinstance(content, tuple) else (content, {})
return cls(**kwargs)
|
class ClassInstantier(OrderedDict):
def __getitem__(self, key):
pass
| 2
| 0
| 4
| 0
| 4
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 51
| 5
| 0
| 5
| 4
| 3
| 0
| 5
| 4
| 3
| 2
| 3
| 0
| 2
|
175
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.ClippedGELUActivation
|
import torch
from torch import Tensor, nn
class ClippedGELUActivation(nn.Module):
"""
Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as
it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to
https://huggingface.co/papers/2004.09602.
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))). See https://huggingface.co/papers/1606.08415
"""
def __init__(self, min: float, max: float):
if min > max:
raise ValueError(f'min should be < max (got min: {min}, max: {max})')
super().__init__()
self.min = min
self.max = max
def forward(self, x: Tensor) -> Tensor:
return torch.clip(gelu(x), self.min, self.max)
|
class ClippedGELUActivation(nn.Module):
'''
Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as
it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to
https://huggingface.co/papers/2004.09602.
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))). See https://huggingface.co/papers/1606.08415
'''
def __init__(self, min: float, max: float):
pass
def forward(self, x: Tensor) -> Tensor:
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 2
| 1
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 23
| 5
| 9
| 5
| 6
| 9
| 9
| 5
| 6
| 2
| 1
| 1
| 3
|
176
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.FastGELUActivation
|
import torch
from .integrations.hub_kernels import use_kernel_forward_from_hub
from torch import Tensor, nn
@use_kernel_forward_from_hub('FastGELU')
class FastGELUActivation(nn.Module):
"""
Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs
"""
def forward(self, input: Tensor) -> Tensor:
return 0.5 * input * (1.0 + torch.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input)))
|
@use_kernel_forward_from_hub('FastGELU')
class FastGELUActivation(nn.Module):
'''
Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs
'''
def forward(self, input: Tensor) -> Tensor:
pass
| 3
| 1
| 2
| 0
| 2
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 11
| 7
| 1
| 3
| 2
| 1
| 3
| 3
| 2
| 1
| 1
| 1
| 0
| 1
|
177
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.GELUActivation
|
import torch
from torch import Tensor, nn
import math
class GELUActivation(nn.Module):
"""
Original Implementation of the GELU activation function in Google BERT repo when initially created. For
information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional
Also see the Gaussian Error Linear Units paper: https://huggingface.co/papers/1606.08415
"""
def __init__(self, use_gelu_python: bool=False):
super().__init__()
if use_gelu_python:
self.act = self._gelu_python
else:
self.act = nn.functional.gelu
def _gelu_python(self, input: Tensor) -> Tensor:
return input * 0.5 * (1.0 + torch.erf(input / math.sqrt(2.0)))
def forward(self, input: Tensor) -> Tensor:
return self.act(input)
|
class GELUActivation(nn.Module):
'''
Original Implementation of the GELU activation function in Google BERT repo when initially created. For
information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional
Also see the Gaussian Error Linear Units paper: https://huggingface.co/papers/1606.08415
'''
def __init__(self, use_gelu_python: bool=False):
pass
def _gelu_python(self, input: Tensor) -> Tensor:
pass
def forward(self, input: Tensor) -> Tensor:
pass
| 4
| 1
| 3
| 0
| 3
| 0
| 1
| 0.55
| 1
| 3
| 0
| 0
| 3
| 1
| 3
| 13
| 20
| 3
| 11
| 5
| 7
| 6
| 10
| 5
| 6
| 2
| 1
| 1
| 4
|
178
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.LaplaceActivation
|
import torch
from torch import Tensor, nn
import math
class LaplaceActivation(nn.Module):
"""
Applies elementwise activation based on Laplace function, introduced in MEGA as an attention activation. See
https://huggingface.co/papers/2209.10655
Inspired by squared relu, but with bounded range and gradient for better stability
"""
def forward(self, input, mu=0.707107, sigma=0.282095):
input = (input - mu).div(sigma * math.sqrt(2.0))
return 0.5 * (1.0 + torch.erf(input))
|
class LaplaceActivation(nn.Module):
'''
Applies elementwise activation based on Laplace function, introduced in MEGA as an attention activation. See
https://huggingface.co/papers/2209.10655
Inspired by squared relu, but with bounded range and gradient for better stability
'''
def forward(self, input, mu=0.707107, sigma=0.282095):
pass
| 2
| 1
| 3
| 0
| 3
| 0
| 1
| 1.25
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 11
| 11
| 2
| 4
| 2
| 2
| 5
| 4
| 2
| 2
| 1
| 1
| 0
| 1
|
179
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.LinearActivation
|
from torch import Tensor, nn
class LinearActivation(nn.Module):
"""
Applies the linear activation function, i.e. forwarding input directly to output.
"""
def forward(self, input: Tensor) -> Tensor:
return input
|
class LinearActivation(nn.Module):
'''
Applies the linear activation function, i.e. forwarding input directly to output.
'''
def forward(self, input: Tensor) -> Tensor:
pass
| 2
| 1
| 2
| 0
| 2
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 11
| 7
| 1
| 3
| 2
| 1
| 3
| 3
| 2
| 1
| 1
| 1
| 0
| 1
|
180
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.MishActivation
|
from torch import Tensor, nn
import torch
class MishActivation(nn.Module):
"""
See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://huggingface.co/papers/1908.08681). Also
visit the official repository for the paper: https://github.com/digantamisra98/Mish
"""
def __init__(self):
super().__init__()
self.act = nn.functional.mish
def _mish_python(self, input: Tensor) -> Tensor:
return input * torch.tanh(nn.functional.softplus(input))
def forward(self, input: Tensor) -> Tensor:
return self.act(input)
|
class MishActivation(nn.Module):
'''
See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://huggingface.co/papers/1908.08681). Also
visit the official repository for the paper: https://github.com/digantamisra98/Mish
'''
def __init__(self):
pass
def _mish_python(self, input: Tensor) -> Tensor:
pass
def forward(self, input: Tensor) -> Tensor:
pass
| 4
| 1
| 3
| 0
| 3
| 0
| 1
| 0.36
| 1
| 2
| 0
| 0
| 3
| 1
| 3
| 13
| 18
| 3
| 11
| 5
| 7
| 4
| 10
| 5
| 6
| 2
| 1
| 1
| 4
|
181
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.NewGELUActivation
|
from .integrations.hub_kernels import use_kernel_forward_from_hub
import torch
from torch import Tensor, nn
import math
@use_kernel_forward_from_hub('NewGELU')
class NewGELUActivation(nn.Module):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://huggingface.co/papers/1606.08415
"""
def forward(self, input: Tensor) -> Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
|
@use_kernel_forward_from_hub('NewGELU')
class NewGELUActivation(nn.Module):
'''
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://huggingface.co/papers/1606.08415
'''
def forward(self, input: Tensor) -> Tensor:
pass
| 3
| 1
| 2
| 0
| 2
| 0
| 1
| 1.33
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 11
| 8
| 1
| 3
| 2
| 1
| 4
| 3
| 2
| 1
| 1
| 1
| 0
| 1
|
182
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.PytorchGELUTanh
|
from torch import Tensor, nn
class PytorchGELUTanh(nn.Module):
"""
A fast C implementation of the tanh approximation of the GeLU activation function. See
https://huggingface.co/papers/1606.08415.
This implementation is equivalent to NewGELU and FastGELU but much faster. However, it is not an exact numerical
match due to rounding errors.
"""
def forward(self, input: Tensor) -> Tensor:
return nn.functional.gelu(input, approximate='tanh')
|
class PytorchGELUTanh(nn.Module):
'''
A fast C implementation of the tanh approximation of the GeLU activation function. See
https://huggingface.co/papers/1606.08415.
This implementation is equivalent to NewGELU and FastGELU but much faster. However, it is not an exact numerical
match due to rounding errors.
'''
def forward(self, input: Tensor) -> Tensor:
pass
| 2
| 1
| 5
| 0
| 5
| 0
| 2
| 0.6
| 1
| 3
| 0
| 0
| 2
| 0
| 2
| 12
| 19
| 3
| 10
| 3
| 7
| 6
| 7
| 3
| 4
| 2
| 1
| 1
| 3
|
183
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.QuickGELUActivation
|
from .integrations.hub_kernels import use_kernel_forward_from_hub
from torch import Tensor, nn
import torch
@use_kernel_forward_from_hub('QuickGELU')
class QuickGELUActivation(nn.Module):
"""
Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
"""
def forward(self, input: Tensor) -> Tensor:
return input * torch.sigmoid(1.702 * input)
|
@use_kernel_forward_from_hub('QuickGELU')
class QuickGELUActivation(nn.Module):
'''
Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs
'''
def forward(self, input: Tensor) -> Tensor:
pass
| 3
| 1
| 2
| 0
| 2
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 11
| 7
| 1
| 3
| 2
| 1
| 3
| 3
| 2
| 1
| 1
| 1
| 0
| 1
|
184
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/activations.py
|
transformers.activations.ReLUSquaredActivation
|
import torch
from torch import Tensor, nn
class ReLUSquaredActivation(nn.Module):
"""
Applies the relu^2 activation introduced in https://huggingface.co/papers/2109.08668v2
"""
def forward(self, input):
relu_applied = nn.functional.relu(input)
squared = torch.square(relu_applied)
return squared
|
class ReLUSquaredActivation(nn.Module):
'''
Applies the relu^2 activation introduced in https://huggingface.co/papers/2109.08668v2
'''
def forward(self, input):
pass
| 2
| 1
| 4
| 0
| 4
| 0
| 1
| 0.6
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 11
| 9
| 1
| 5
| 4
| 3
| 3
| 5
| 4
| 3
| 1
| 1
| 0
| 1
|
185
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.Cache
|
from typing import Any, Optional
import torch
class Cache:
"""
A `Cache` is mostly a list of `CacheLayerMixin` objects, one per model layer. It serves as a container for
the Cache of each layer.
Args:
layers (`Optional`, *optional*):
A list of pre-created `CacheLayerMixin`. If omitted (`None`), then `layer_class_to_replicate` will
be used.
layer_class_to_replicate (`type[CacheLayerMixin]`, *optional*):
Only used if `layers` is omitted (`None`), in which case it will be used as the base class for each layer,
and the layers will be added lazily as soon as `update` is called with a `layer_idx` greater than the current
list of layers.
offloading (`bool`, *optional*, defaults to `False`):
Whether to perform offloading of the layers to `cpu`, to save GPU memory.
offload_only_non_sliding (`bool`, *optional*, defaults to `True`):
If `offloading` is `True`, this further decides if only the non-sliding layers will be offloaded (because
usually the sliding layers are small in size, so there is no need to offload them, and skipping it is faster).
"""
def __init__(self, layers: Optional[list[CacheLayerMixin]]=None, layer_class_to_replicate: Optional[type[CacheLayerMixin]]=None, offloading: bool=False, offload_only_non_sliding: bool=True):
if layers is not None and layer_class_to_replicate is not None:
raise ValueError('You can construct a Cache either from a list `layers` of all the predefined `CacheLayer`, or from a `layer_class_to_replicate`, in which case the Cache will append a new layer corresponding to `layer_class_to_replicate` for each new call to `update` with an idx not already in the Cache.')
if layers is None and layer_class_to_replicate is None:
raise ValueError('You should provide exactly one of `layers` or `layer_class_to_replicate` to initialize a Cache.')
self.layers = layers if layers is not None else []
self.layer_class_to_replicate = layer_class_to_replicate
self.offloading = offloading
if self.offloading:
self.only_non_sliding = offload_only_non_sliding
self.prefetch_stream = torch.Stream() if _is_torch_greater_or_equal_than_2_7 else torch.cuda.Stream()
def __repr__(self):
return f'{self.__class__.__name__}(layers={self.layers})'
def prefetch(self, layer_idx: int, only_non_sliding: bool=True):
"""
Prefetch a given layer on its device. If `only_non_sliding` is True, it will try to prefetch only the layers
which are non-sliding. If the `layer_idx` is outside the range, this will circle back to the first layers.
Note that we use a non-default stream for this, to avoid blocking.
"""
if only_non_sliding:
try:
layer_idx = layer_idx + self.is_sliding[layer_idx:].index(False)
except ValueError:
layer_idx = self.is_sliding.index(False)
else:
layer_idx = layer_idx if layer_idx < len(self.layers) else 0
with self.prefetch_stream if _is_torch_greater_or_equal_than_2_7 else torch.cuda.stream(self.prefetch_stream):
self.layers[layer_idx].prefetch()
def offload(self, layer_idx: int, only_non_sliding: bool=True):
"""
Offload a given `layer_idx`. If `only_non_sliding` is True, it will offload `layer_idx` only if it is a
non-sliding layer. Note that we do it on the default stream, so that we ensure all earlier
computation in the layer's `update` methods are finished.
"""
if not (only_non_sliding and self.is_sliding[layer_idx]):
self.layers[layer_idx].offload()
def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[dict[str, Any]]=None) -> tuple[torch.Tensor, torch.Tensor]:
"""
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
Parameters:
key_states (`torch.Tensor`):
The new key states to cache.
value_states (`torch.Tensor`):
The new value states to cache.
layer_idx (`int`):
The index of the layer to cache the states for.
cache_kwargs (`dict[str, Any]`, *optional*):
Additional arguments for the cache subclass. These are specific to each subclass and allow new types of
cache to be created.
Return:
A tuple containing the updated key and value states.
"""
if self.layer_class_to_replicate is not None:
while len(self.layers) <= layer_idx:
self.layers.append(self.layer_class_to_replicate())
if self.offloading:
torch.cuda.default_stream(key_states.device).wait_stream(self.prefetch_stream)
self.prefetch(layer_idx + 1, self.only_non_sliding)
keys, values = self.layers[layer_idx].update(key_states, value_states, cache_kwargs)
if self.offloading:
self.offload(layer_idx, self.only_non_sliding)
return (keys, values)
def early_initialization(self, batch_size: int, num_heads: int, head_dim: int, dtype: torch.dtype, device: torch.device):
"""
Initialize all the layers in advance (it's otherwise lazily initialized on the first `update` call).
This is useful for our `export` recipes, as `export` needs everything in advance.
"""
fake_keys_tensor = torch.zeros((batch_size, num_heads, 0, head_dim), dtype=dtype, device=device)
for layer in self.layers:
layer.lazy_initialization(fake_keys_tensor)
def get_seq_length(self, layer_idx: Optional[int]=0) -> int:
"""Returns the sequence length of the cache for the given layer."""
if layer_idx >= len(self.layers):
return 0
return self.layers[layer_idx].get_seq_length()
def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]:
"""
Return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for
the given layer at `layer_idx`.
The masks are then prepared according to the given lengths (kv_length, kv_offset) and patterns for each layer.
"""
if layer_idx >= len(self.layers):
return (cache_position.shape[0], 0)
return self.layers[layer_idx].get_mask_sizes(cache_position)
def get_max_cache_shape(self, layer_idx: int=0) -> int:
"""Returns maximum sequence length of the cache object. Dynamic caches do not have a maximum length."""
if layer_idx >= len(self.layers):
return -1
return self.layers[layer_idx].get_max_cache_shape()
def reset(self):
"""Recursively reset all layers tensors"""
for layer_idx in range(len(self.layers)):
self.layers[layer_idx].reset()
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorder the cache for beam search"""
for layer_idx in range(len(self.layers)):
self.layers[layer_idx].reorder_cache(beam_idx)
def crop(self, max_length: int):
"""Crop the cache to the given length"""
for layer_idx in range(len(self.layers)):
self.layers[layer_idx].crop(max_length)
def batch_repeat_interleave(self, repeats: int):
"""Repeat and interleave the cache"""
for layer_idx in range(len(self.layers)):
self.layers[layer_idx].batch_repeat_interleave(repeats)
def batch_select_indices(self, indices: torch.Tensor):
"""Select indices from the cache"""
for layer_idx in range(len(self.layers)):
self.layers[layer_idx].batch_select_indices(indices)
@property
def max_batch_size(self) -> int:
"""Return the maximum batch size of the cache"""
values = [layer.max_batch_size for layer in self.layers]
if len(set(values)) > 1:
raise ValueError(f'Max batch size is not consistent across layers: {values}')
return values[0]
@property
def max_cache_len(self) -> int:
"""Return the maximum cache length of the cache"""
values = [layer.max_cache_len for layer in self.layers]
return max(values)
@property
def is_compileable(self) -> bool:
"""Return whether the cache is compileable"""
if len(self.layers) == 0:
return False
return all((layer.is_compileable for layer in self.layers))
@property
def is_initialized(self) -> bool:
"""Return whether the cache data is initialized"""
return len(self.layers) > 0 and all((layer.is_initialized for layer in self.layers))
@property
def is_sliding(self) -> list[bool]:
"""Return whether the layers of the cache are sliding window"""
return [getattr(layer, 'is_sliding', False) for layer in self.layers]
def __getitem__(self, layer_idx: int) -> tuple[torch.Tensor, torch.Tensor]:
"""
Support for backwards-compatible `past_key_values` indexing, e.g. `past_key_values[0][0].shape[2]` to get the
sequence length.
"""
if layer_idx < len(self.layers):
return (self.layers[layer_idx].keys, self.layers[layer_idx].values)
else:
raise KeyError(f'Cache only has {len(self.layers)} layers, attempted to access layer with index {layer_idx}')
def __iter__(self):
"""
Support for backwards-compatible `past_key_values` iteration, e.g. `for x in past_key_values:` to iterate over
keys and values
"""
for layer_idx in range(len(self)):
yield (self.layers[layer_idx].keys, self.layers[layer_idx].values)
def __len__(self):
"""
This value corresponds to the number of layers in the model.
"""
return len(self.layers)
|
class Cache:
'''
A `Cache` is mostly a list of `CacheLayerMixin` objects, one per model layer. It serves as a container for
the Cache of each layer.
Args:
layers (`Optional`, *optional*):
A list of pre-created `CacheLayerMixin`. If omitted (`None`), then `layer_class_to_replicate` will
be used.
layer_class_to_replicate (`type[CacheLayerMixin]`, *optional*):
Only used if `layers` is omitted (`None`), in which case it will be used as the base class for each layer,
and the layers will be added lazily as soon as `update` is called with a `layer_idx` greater than the current
list of layers.
offloading (`bool`, *optional*, defaults to `False`):
Whether to perform offloading of the layers to `cpu`, to save GPU memory.
offload_only_non_sliding (`bool`, *optional*, defaults to `True`):
If `offloading` is `True`, this further decides if only the non-sliding layers will be offloaded (because
usually the sliding layers are small in size, so there is no need to offload them, and skipping it is faster).
'''
def __init__(self, layers: Optional[list[CacheLayerMixin]]=None, layer_class_to_replicate: Optional[type[CacheLayerMixin]]=None, offloading: bool=False, offload_only_non_sliding: bool=True):
pass
def __repr__(self):
pass
def prefetch(self, layer_idx: int, only_non_sliding: bool=True):
'''
Prefetch a given layer on its device. If `only_non_sliding` is True, it will try to prefetch only the layers
which are non-sliding. If the `layer_idx` is outside the range, this will circle back to the first layers.
Note that we use a non-default stream for this, to avoid blocking.
'''
pass
def offload(self, layer_idx: int, only_non_sliding: bool=True):
'''
Offload a given `layer_idx`. If `only_non_sliding` is True, it will offload `layer_idx` only if it is a
non-sliding layer. Note that we do it on the default stream, so that we ensure all earlier
computation in the layer's `update` methods are finished.
'''
pass
def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[dict[str, Any]]=None) -> tuple[torch.Tensor, torch.Tensor]:
'''
Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
Parameters:
key_states (`torch.Tensor`):
The new key states to cache.
value_states (`torch.Tensor`):
The new value states to cache.
layer_idx (`int`):
The index of the layer to cache the states for.
cache_kwargs (`dict[str, Any]`, *optional*):
Additional arguments for the cache subclass. These are specific to each subclass and allow new types of
cache to be created.
Return:
A tuple containing the updated key and value states.
'''
pass
def early_initialization(self, batch_size: int, num_heads: int, head_dim: int, dtype: torch.dtype, device: torch.device):
'''
Initialize all the layers in advance (it's otherwise lazily initialized on the first `update` call).
This is useful for our `export` recipes, as `export` needs everything in advance.
'''
pass
def get_seq_length(self, layer_idx: Optional[int]=0) -> int:
'''Returns the sequence length of the cache for the given layer.'''
pass
def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]:
'''
Return a tuple (kv_length, kv_offset) corresponding to the length and offset that will be returned for
the given layer at `layer_idx`.
The masks are then prepared according to the given lengths (kv_length, kv_offset) and patterns for each layer.
'''
pass
def get_max_cache_shape(self, layer_idx: int=0) -> int:
'''Returns maximum sequence length of the cache object. Dynamic caches do not have a maximum length.'''
pass
def reset(self):
'''Recursively reset all layers tensors'''
pass
def reorder_cache(self, beam_idx: torch.LongTensor):
'''Reorder the cache for beam search'''
pass
def crop(self, max_length: int):
'''Crop the cache to the given length'''
pass
def batch_repeat_interleave(self, repeats: int):
'''Repeat and interleave the cache'''
pass
def batch_select_indices(self, indices: torch.Tensor):
'''Select indices from the cache'''
pass
@property
def max_batch_size(self) -> int:
'''Return the maximum batch size of the cache'''
pass
@property
def max_cache_len(self) -> int:
'''Return the maximum cache length of the cache'''
pass
@property
def is_compileable(self) -> bool:
'''Return whether the cache is compileable'''
pass
@property
def is_initialized(self) -> bool:
'''Return whether the cache data is initialized'''
pass
@property
def is_sliding(self) -> list[bool]:
'''Return whether the layers of the cache are sliding window'''
pass
def __getitem__(self, layer_idx: int) -> tuple[torch.Tensor, torch.Tensor]:
'''
Support for backwards-compatible `past_key_values` indexing, e.g. `past_key_values[0][0].shape[2]` to get the
sequence length.
'''
pass
def __iter__(self):
'''
Support for backwards-compatible `past_key_values` iteration, e.g. `for x in past_key_values:` to iterate over
keys and values
'''
pass
def __len__(self):
'''
This value corresponds to the number of layers in the model.
'''
pass
| 28
| 21
| 9
| 0
| 5
| 3
| 2
| 0.65
| 1
| 7
| 0
| 5
| 7
| 0
| 7
| 17
| 76
| 10
| 40
| 20
| 25
| 26
| 29
| 13
| 21
| 4
| 1
| 2
| 12
|
186
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.DynamicCache
|
import torch
from collections.abc import Iterable
from typing import Any, Optional
from .configuration_utils import PretrainedConfig
class DynamicCache(Cache):
"""
A cache that grows dynamically as more tokens are generated. This is the default for generative models.
It stores the key and value states as a list of `CacheLayer`, one for each layer. The expected shape for each tensor
in the `CacheLayer`s is `[batch_size, num_heads, seq_len, head_dim]`.
If a config is passed, it will additionally check for sliding or hybrid cache structure, greatly reducing the
memory requirement of the cached tensors to `[batch_size, num_heads, min(seq_len, sliding_window), head_dim]`.
See `Cache` for details on common methods that are implemented by all cache classes.
Args:
ddp_cache_data (`Iterable[tuple[torch.Tensor, torch.Tensor]]`, *optional*):
It was originally added for compatibility with `torch.distributed` (DDP). In a nutshell, it is
`map(gather_map, zip(*caches))`, i.e. each item in the iterable contains the key and value states
for a layer gathered across replicas by torch.distributed (shape=[global batch size, num_heads, seq_len, head_dim]).
Note: it needs to be the 1st arg as well to work correctly
config (`PretrainedConfig`, *optional*):
The config of the model for which this Cache will be used. If passed, it will be used to check for sliding
or hybrid layer structure, greatly reducing the memory requirement of the cached tensors to
`[batch_size, num_heads, min(seq_len, sliding_window), head_dim]`.
offloading (`bool`, *optional*, defaults to `False`):
Whether to perform offloading of the layers to `cpu`, to save GPU memory.
offload_only_non_sliding (`bool`, *optional*, defaults to `False`):
If `offloading` is `True`, this further decides if only the non-sliding layers will be offloaded (because
usually the sliding layers are small in size, so there is no need to offload them, and skipping it is faster).
Example:
```python
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
>>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
>>> inputs = tokenizer(text="My name is Qwen2", return_tensors="pt")
>>> # Prepare a cache class and pass it to model's forward
>>> past_key_values = DynamicCache(config=model.config)
>>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
>>> outputs.past_key_values # access cache filled with key/values from generation
```
"""
def __init__(self, ddp_cache_data: Optional[Iterable[tuple[torch.Tensor, torch.Tensor]]]=None, config: Optional[PretrainedConfig]=None, offloading: bool=False, offload_only_non_sliding: bool=False):
layers = []
if config is not None:
config = config.get_text_config(decoder=True)
sliding_window = getattr(config, 'sliding_window', None) or getattr(config, 'attention_chunk_size', None)
layer_types = getattr(config, 'layer_types', None)
if layer_types is None:
layer_types = ['sliding_attention' if sliding_window is not None else 'full_attention' for _ in range(config.num_hidden_layers)]
if hasattr(config, 'num_kv_shared_layers'):
layer_types = layer_types[:-config.num_kv_shared_layers]
for layer_type in layer_types:
if layer_type in ('sliding_attention', 'chunked_attention'):
layers.append(DynamicSlidingWindowLayer(sliding_window=sliding_window))
else:
layers.append(DynamicLayer())
if ddp_cache_data is not None:
for layer_idx, (key_states, value_states) in enumerate(ddp_cache_data):
if config is None:
layers.append(DynamicLayer())
_, _ = layers[layer_idx].update(key_states, value_states)
if len(layers) == 0:
super().__init__(layer_class_to_replicate=DynamicLayer, offloading=offloading, offload_only_non_sliding=offload_only_non_sliding)
else:
super().__init__(layers=layers, offloading=offloading, offload_only_non_sliding=offload_only_non_sliding)
def to_legacy_cache(self) -> tuple[tuple[torch.Tensor, torch.Tensor]]:
"""
Converts the `Cache` instance into the its equivalent in the legacy cache format. Used for
backward compatibility.
"""
legacy_cache = ()
for layer in self.layers:
legacy_cache += ((layer.keys, layer.values),)
return legacy_cache
@classmethod
def from_legacy_cache(cls, past_key_values: tuple[tuple[torch.Tensor, torch.Tensor]]) -> 'DynamicCache':
"""
Converts a cache in the legacy cache format into an equivalent `Cache`. Used for
backward compatibility.
"""
cache = cls()
if past_key_values is None:
logger.warning_once('past_key_values should not be None in from_legacy_cache()')
if past_key_values is not None:
for layer_idx in range(len(past_key_values)):
key_states, value_states = past_key_values[layer_idx]
cache.update(key_states, value_states, layer_idx)
return cache
|
class DynamicCache(Cache):
'''
A cache that grows dynamically as more tokens are generated. This is the default for generative models.
It stores the key and value states as a list of `CacheLayer`, one for each layer. The expected shape for each tensor
in the `CacheLayer`s is `[batch_size, num_heads, seq_len, head_dim]`.
If a config is passed, it will additionally check for sliding or hybrid cache structure, greatly reducing the
memory requirement of the cached tensors to `[batch_size, num_heads, min(seq_len, sliding_window), head_dim]`.
See `Cache` for details on common methods that are implemented by all cache classes.
Args:
ddp_cache_data (`Iterable[tuple[torch.Tensor, torch.Tensor]]`, *optional*):
It was originally added for compatibility with `torch.distributed` (DDP). In a nutshell, it is
`map(gather_map, zip(*caches))`, i.e. each item in the iterable contains the key and value states
for a layer gathered across replicas by torch.distributed (shape=[global batch size, num_heads, seq_len, head_dim]).
Note: it needs to be the 1st arg as well to work correctly
config (`PretrainedConfig`, *optional*):
The config of the model for which this Cache will be used. If passed, it will be used to check for sliding
or hybrid layer structure, greatly reducing the memory requirement of the cached tensors to
`[batch_size, num_heads, min(seq_len, sliding_window), head_dim]`.
offloading (`bool`, *optional*, defaults to `False`):
Whether to perform offloading of the layers to `cpu`, to save GPU memory.
offload_only_non_sliding (`bool`, *optional*, defaults to `False`):
If `offloading` is `True`, this further decides if only the non-sliding layers will be offloaded (because
usually the sliding layers are small in size, so there is no need to offload them, and skipping it is faster).
Example:
```python
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
>>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
>>> inputs = tokenizer(text="My name is Qwen2", return_tensors="pt")
>>> # Prepare a cache class and pass it to model's forward
>>> past_key_values = DynamicCache(config=model.config)
>>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
>>> outputs.past_key_values # access cache filled with key/values from generation
```
'''
def __init__(self, ddp_cache_data: Optional[Iterable[tuple[torch.Tensor, torch.Tensor]]]=None, config: Optional[PretrainedConfig]=None, offloading: bool=False, offload_only_non_sliding: bool=False):
pass
def to_legacy_cache(self) -> tuple[tuple[torch.Tensor, torch.Tensor]]:
'''
Converts the `Cache` instance into the its equivalent in the legacy cache format. Used for
backward compatibility.
'''
pass
@classmethod
def from_legacy_cache(cls, past_key_values: tuple[tuple[torch.Tensor, torch.Tensor]]) -> 'DynamicCache':
'''
Converts a cache in the legacy cache format into an equivalent `Cache`. Used for
backward compatibility.
'''
pass
| 5
| 3
| 11
| 0
| 7
| 4
| 2
| 0.61
| 1
| 7
| 0
| 5
| 12
| 3
| 14
| 31
| 198
| 26
| 110
| 53
| 79
| 67
| 85
| 39
| 70
| 6
| 2
| 3
| 34
|
187
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.EncoderDecoderCache
|
from collections.abc import Iterable
from typing import Any, Optional
import torch
class EncoderDecoderCache(Cache):
"""
Base, abstract class for all encoder-decoder caches. Can be used to hold combinations of self-attention and
cross-attention caches.
See `Cache` for details on common methods that are implemented by all cache classes.
Args:
caches (`Iterable`):
Usually an iterable of length 2, containing 2 `Cache` objects, the first one for self-attention, the
second one for cross-attention. Can optionally also be an iterable of length 1, containing a
`tuple[tuple[torch.Tensor]]` (usually used for compatibility with torch dp and ddp).
Example:
```python
>>> from transformers import AutoProcessor, AutoModelForCausalLM, DynamicCache, EncoderDecoderCache
>>> model = AutoModelForCausalLM.from_pretrained("openai/whisper-small")
>>> processor = AutoProcessor.from_pretrained("openai/whisper-small")
>>> inputs = processor(audio=YOUR-AUDIO, return_tensors="pt")
>>> # Prepare cache classes for encoder and decoder and pass it to model's forward
>>> self_attention_cache = DynamicCache(config=self.config)
>>> cross_attention_cache = DynamicCache(config=self.config)
>>> past_key_values = EncoderDecoderCache(self_attention_cache, cross_attention_cache)
>>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
>>> outputs.past_key_values # access cache filled with key/values from generation
EncoderDecoderCache()
```
"""
def __init__(self, *caches) -> None:
if len(caches) == 1:
self.self_attention_cache = DynamicCache()
self.cross_attention_cache = DynamicCache()
for layer_idx, key_value_states in enumerate(caches[0]):
key_states, value_states = key_value_states[:2]
self.self_attention_cache.update(key_states, value_states, layer_idx)
if len(key_value_states) > 2:
key_states, value_states = key_value_states[2:]
self.cross_attention_cache.update(key_states, value_states, layer_idx)
elif len(caches) == 2:
if not isinstance(caches[0], Cache) or not isinstance(caches[1], Cache):
raise TypeError(f'One of the two arguments is not a Cache: type(caches[0]) = {type(caches[0])!r}, type(caches[1]) = {type(caches[1])!r}')
self.self_attention_cache = caches[0]
self.cross_attention_cache = caches[1]
else:
raise ValueError(f'Expected 1 or 2 arguments, got {len(caches)}')
self.is_updated = {}
for layer_idx in range(len(self.cross_attention_cache)):
self.is_updated[layer_idx] = bool(self.cross_attention_cache.get_seq_length(layer_idx) > 0)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(self_attention_cache={self.self_attention_cache}, cross_attention_cache={self.cross_attention_cache})'
def __iter__(self):
"""
Support for backwards-compatible `past_key_values` iteration, e.g. `for x in past_key_values:` to iterate over
keys and values
"""
for layer_idx in range(len(self)):
yield (self.self_attention_cache.layers[layer_idx].keys, self.self_attention_cache.layers[layer_idx].values, self.cross_attention_cache.layers[layer_idx].keys, self.cross_attention_cache.layers[layer_idx].values)
def __getitem__(self, layer_idx: int) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Support for backwards-compatible `past_key_values` indexing, e.g. `past_key_values[0][0].shape[2]` to get the
sequence length.
"""
if layer_idx < len(self):
return (self.self_attention_cache.layers[layer_idx].keys, self.self_attention_cache.layers[layer_idx].values, self.cross_attention_cache.layers[layer_idx].keys, self.cross_attention_cache.layers[layer_idx].values)
else:
raise KeyError(f'Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}')
def __len__(self):
"""
Support for backwards-compatible `past_key_values` length, e.g. `len(past_key_values)`. This value corresponds
to the number of layers in the model.
"""
return len(self.self_attention_cache)
def to_legacy_cache(self) -> tuple[tuple[torch.Tensor]]:
"""Converts the `EncoderDecoderCache` instance into its equivalent in the legacy cache format."""
legacy_cache = ()
if len(self.cross_attention_cache) > 0:
for self_attn, cross_attn in zip(self.self_attention_cache.to_legacy_cache(), self.cross_attention_cache.to_legacy_cache()):
legacy_cache += (self_attn + cross_attn,)
else:
legacy_cache = self.self_attention_cache.to_legacy_cache()
return legacy_cache
@classmethod
def from_legacy_cache(cls, past_key_values: Optional[Iterable[tuple[torch.FloatTensor, ...]]]) -> 'EncoderDecoderCache':
"""Converts a cache in the legacy cache format into an equivalent `EncoderDecoderCache`."""
cache = cls(DynamicCache(), DynamicCache())
if past_key_values is None:
logger.warning_once('past_key_values should not be None in from_legacy_cache()')
else:
for layer_idx, key_value_states in enumerate(past_key_values):
key_states, value_states = key_value_states[:2]
cache.self_attention_cache.update(key_states, value_states, layer_idx)
if len(key_value_states) > 2:
key_states, value_states = key_value_states[2:]
cache.cross_attention_cache.update(key_states, value_states, layer_idx)
cache.is_updated[layer_idx] = True
return cache
def get_seq_length(self, layer_idx: Optional[int]=0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
return self.self_attention_cache.get_seq_length(layer_idx)
def reset(self):
self.self_attention_cache.reset()
self.cross_attention_cache.reset()
for layer_idx in self.is_updated:
self.is_updated[layer_idx] = False
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
self.self_attention_cache.reorder_cache(beam_idx)
self.cross_attention_cache.reorder_cache(beam_idx)
def check_dynamic_cache(self, method: str):
if not (isinstance(self.self_attention_cache, DynamicCache) and isinstance(self.cross_attention_cache, DynamicCache)):
raise ValueError(f'`{method}` is only defined for dynamic cache, got {self.self_attention_cache.__str__()} for the self attention cache and {self.cross_attention_cache.__str__()} for the cross attention cache.')
def crop(self, maximum_length: int):
"""
Crop the past key values up to a new `maximum_length` in terms of tokens. `maximum_length` can also be
negative to remove `maximum_length` tokens. This is used in assisted decoding and contrastive search (on the Hub).
"""
self.check_dynamic_cache(self.crop.__name__)
self.self_attention_cache.crop(maximum_length)
def batch_split(self, full_batch_size: int, split_size: int) -> 'list[EncoderDecoderCache]':
"""
Split the current instance into a list of `DynamicCache` by the batch size. This will be used by
`_split_model_inputs()` in `generation.utils`
"""
self.check_dynamic_cache(self.batch_split.__name__)
self_attention_cache = self.self_attention_cache.batch_split(full_batch_size, split_size)
cross_attention_cache = self.cross_attention_cache.batch_split(full_batch_size, split_size)
out = []
for self_attn, cross_attn in zip(self_attention_cache, cross_attention_cache):
out.append(EncoderDecoderCache(self_attn, cross_attn))
return out
def batch_repeat_interleave(self, repeats: int):
"""Repeat the cache `repeats` times in the batch dimension. Used in contrastive search (on the Hub)."""
self.check_dynamic_cache(self.batch_repeat_interleave.__name__)
self.self_attention_cache.batch_repeat_interleave(repeats)
self.cross_attention_cache.batch_repeat_interleave(repeats)
def batch_select_indices(self, indices: torch.Tensor):
"""Only keep the `indices` in the batch dimension of the cache. Used in contrastive search (on the Hub)."""
self.check_dynamic_cache(self.batch_select_indices.__name__)
self.self_attention_cache.batch_select_indices(indices)
self.cross_attention_cache.batch_select_indices(indices)
def get_max_cache_shape(self) -> int:
"""Returns the maximum sequence length (i.e. max capacity) of the cache object"""
return self.self_attention_cache.get_max_cache_shape()
def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]:
return self.self_attention_cache.get_mask_sizes(cache_position, layer_idx)
@property
def is_sliding(self):
return self.self_attention_cache.is_sliding
@property
def is_compileable(self) -> bool:
return self.self_attention_cache.is_compileable
| null | 23
| 13
| 9
| 0
| 8
| 2
| 2
| 0.35
| 1
| 10
| 1
| 0
| 12
| 4
| 14
| 31
| 176
| 23
| 113
| 44
| 88
| 40
| 79
| 35
| 64
| 5
| 2
| 3
| 28
|
188
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.HQQQuantizedCache
|
from .configuration_utils import PretrainedConfig
class HQQQuantizedCache(QuantizedCache):
def __init__(self, config: PretrainedConfig, nbits: int=4, axis_key: int=0, axis_value: int=0, q_group_size: int=64, residual_length: int=128):
logger.warning_once("`HQQQuantizedCache` is deprecated and will be removed in version v4.59 Use `QuantizedCache(backend='hqq', ...)` instead.")
super().__init__('hqq', config, nbits, axis_key, axis_value, q_group_size, residual_length)
|
class HQQQuantizedCache(QuantizedCache):
def __init__(self, config: PretrainedConfig, nbits: int=4, axis_key: int=0, axis_value: int=0, q_group_size: int=64, residual_length: int=128):
pass
| 2
| 0
| 10
| 1
| 9
| 0
| 2
| 0.75
| 1
| 3
| 1
| 0
| 3
| 4
| 3
| 39
| 60
| 12
| 28
| 11
| 24
| 21
| 19
| 8
| 15
| 4
| 4
| 1
| 6
|
189
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.HybridCache
|
from .configuration_utils import PretrainedConfig
class HybridCache(StaticCache):
def __init__(self, config: PretrainedConfig, max_cache_len: int, *args, **kwargs):
logger.warning_once('`HybridCache` is deprecated and will be removed in version v4.59 Use `StaticCache(...)` instead which will correctly infer the type of each layer.')
super().__init__(config=config, max_cache_len=max_cache_len)
|
class HybridCache(StaticCache):
def __init__(self, config: PretrainedConfig, max_cache_len: int, *args, **kwargs):
pass
| 2
| 0
| 20
| 1
| 17
| 2
| 3
| 0.36
| 1
| 8
| 0
| 0
| 8
| 9
| 8
| 25
| 213
| 26
| 138
| 52
| 112
| 50
| 87
| 35
| 78
| 10
| 2
| 2
| 24
|
190
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.OffloadedCache
|
class OffloadedCache(DynamicCache):
def __init__(self) -> None:
logger.warning_once('`OffloadedCache` is deprecated and will be removed in version v4.59 Use `DynamicCache(offloading=True)` instead')
super().__init__(offloading=True)
|
class OffloadedCache(DynamicCache):
def __init__(self) -> None:
pass
| 2
| 0
| 15
| 0
| 10
| 5
| 2
| 0.67
| 1
| 9
| 0
| 0
| 6
| 3
| 6
| 37
| 112
| 11
| 61
| 24
| 48
| 41
| 52
| 18
| 45
| 4
| 3
| 2
| 14
|
191
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.OffloadedStaticCache
|
from .configuration_utils import PretrainedConfig
class OffloadedStaticCache(StaticCache):
def __init__(self, config: PretrainedConfig, max_cache_len: int, *args, **kwargs):
logger.warning_once('`OffloadedStaticCache` is deprecated and will be removed in version v4.59 Use `StaticCache(..., offloading=True)` instead')
super().__init__(config=config, max_cache_len=max_cache_len, offloading=True)
|
class OffloadedStaticCache(StaticCache):
def __init__(self, config: PretrainedConfig, max_cache_len: int, *args, **kwargs):
pass
| 2
| 0
| 23
| 4
| 12
| 7
| 3
| 0.97
| 1
| 8
| 0
| 0
| 9
| 11
| 9
| 32
| 280
| 53
| 115
| 53
| 86
| 112
| 89
| 34
| 79
| 10
| 3
| 3
| 29
|
192
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.QuantizedCache
|
from .configuration_utils import PretrainedConfig
class QuantizedCache(Cache):
"""
A quantizer cache similar to what is described in the
[KIVI: A Tuning-Free Asymmetric 2bit Quantization for KV Cache paper](https://huggingface.co/papers/2402.02750).
It allows the model to generate longer sequence length without allocating too much memory for keys and values
by applying quantization.
The cache has two types of storage, one for original precision and one for the
quantized cache. A `residual length` is set as a maximum capacity for the original precision cache. When the
length goes beyond maximum capacity, the original precision cache is discarded and moved into the quantized cache.
The quantization is done per-channel with a set `q_group_size` for both keys and values, in contrast to what was
described in the paper.
See `Cache` for details on common methods that are implemented by all cache classes.
Args:
backend (`str`):
The quantization backend to use. One of `("quanto", "hqq").
config (`PretrainedConfig`):
The config of the model for which this Cache will be used.
nbits (`int`, *optional*, defaults to 4):
The number of bits for quantization.
axis_key (`int`, *optional*, defaults to 0):
The axis on which to quantize the keys.
axis_value (`int`, *optional*, defaults to 0):
The axis on which to quantize the values.
q_group_size (`int`, *optional*, defaults to 64):
Quantization is done per-channel according to a set `q_group_size` for both keys and values.
residual_length (`int`, *optional*, defaults to 128):
Maximum capacity for the original precision cache
"""
def __init__(self, backend: str, config: PretrainedConfig, nbits: int=4, axis_key: int=0, axis_value: int=0, q_group_size: int=64, residual_length: int=128):
if backend == 'quanto':
layer_class = QuantoQuantizedLayer
elif backend == 'hqq':
layer_class = HQQQuantizedLayer
else:
raise ValueError(f'Unknown quantization backend `{backend}`')
config = config.get_text_config(decoder=True)
layers = [layer_class(nbits, axis_key, axis_value, q_group_size, residual_length) for _ in range(config.num_hidden_layers)]
super().__init__(layers=layers)
|
class QuantizedCache(Cache):
'''
A quantizer cache similar to what is described in the
[KIVI: A Tuning-Free Asymmetric 2bit Quantization for KV Cache paper](https://huggingface.co/papers/2402.02750).
It allows the model to generate longer sequence length without allocating too much memory for keys and values
by applying quantization.
The cache has two types of storage, one for original precision and one for the
quantized cache. A `residual length` is set as a maximum capacity for the original precision cache. When the
length goes beyond maximum capacity, the original precision cache is discarded and moved into the quantized cache.
The quantization is done per-channel with a set `q_group_size` for both keys and values, in contrast to what was
described in the paper.
See `Cache` for details on common methods that are implemented by all cache classes.
Args:
backend (`str`):
The quantization backend to use. One of `("quanto", "hqq").
config (`PretrainedConfig`):
The config of the model for which this Cache will be used.
nbits (`int`, *optional*, defaults to 4):
The number of bits for quantization.
axis_key (`int`, *optional*, defaults to 0):
The axis on which to quantize the keys.
axis_value (`int`, *optional*, defaults to 0):
The axis on which to quantize the values.
q_group_size (`int`, *optional*, defaults to 64):
Quantization is done per-channel according to a set `q_group_size` for both keys and values.
residual_length (`int`, *optional*, defaults to 128):
Maximum capacity for the original precision cache
'''
def __init__(self, backend: str, config: PretrainedConfig, nbits: int=4, axis_key: int=0, axis_value: int=0, q_group_size: int=64, residual_length: int=128):
pass
| 2
| 1
| 14
| 1
| 12
| 1
| 2
| 0.29
| 1
| 8
| 1
| 2
| 5
| 9
| 5
| 36
| 88
| 12
| 59
| 24
| 47
| 17
| 45
| 18
| 39
| 5
| 3
| 2
| 11
|
193
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.QuantoQuantizedCache
|
from .configuration_utils import PretrainedConfig
class QuantoQuantizedCache(QuantizedCache):
def __init__(self, config: PretrainedConfig, nbits: int=4, axis_key: int=0, axis_value: int=0, q_group_size: int=64, residual_length: int=128):
logger.warning_once("`QuantoQuantizedCache` is deprecated and will be removed in version v4.59 Use `QuantizedCache(backend='quanto', ...)` instead.")
super().__init__('quanto', config, nbits, axis_key, axis_value, q_group_size, residual_length)
|
class QuantoQuantizedCache(QuantizedCache):
def __init__(self, config: PretrainedConfig, nbits: int=4, axis_key: int=0, axis_value: int=0, q_group_size: int=64, residual_length: int=128):
pass
| 2
| 0
| 11
| 2
| 9
| 1
| 3
| 0.79
| 1
| 4
| 1
| 0
| 3
| 2
| 3
| 39
| 64
| 15
| 28
| 11
| 22
| 22
| 24
| 11
| 18
| 7
| 4
| 2
| 10
|
194
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.SinkCache
|
class SinkCache(Cache):
"""
It is now a `custom_generate` repository on the Hub: https://huggingface.co/transformers-community/sink_cache.
See [these docs](https://huggingface.co/docs/transformers/generation_strategies#custom-decoding-methods) for
general `custom_generate`usage.
"""
def __init__(self, **kwargs) -> None:
raise NotImplementedError('`SinkCache` has been moved as a `custom_generate` repository on the Hub: https://huggingface.co/transformers-community/sink_cache. See the repository for usage examples.')
|
class SinkCache(Cache):
'''
It is now a `custom_generate` repository on the Hub: https://huggingface.co/transformers-community/sink_cache.
See [these docs](https://huggingface.co/docs/transformers/generation_strategies#custom-decoding-methods) for
general `custom_generate`usage.
'''
def __init__(self, **kwargs) -> None:
pass
| 2
| 1
| 20
| 2
| 14
| 5
| 3
| 0.6
| 1
| 5
| 0
| 0
| 6
| 8
| 7
| 24
| 185
| 28
| 99
| 47
| 80
| 59
| 72
| 36
| 64
| 11
| 2
| 3
| 19
|
195
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.SlidingWindowCache
|
from .configuration_utils import PretrainedConfig
class SlidingWindowCache(StaticCache):
def __init__(self, config: PretrainedConfig, max_cache_len: int, *args, **kwargs):
logger.warning_once('`SlidingWindowCache` is deprecated and will be removed in version v4.59 Use `StaticCache(...)` instead which will correctly infer the type of each layer.')
super().__init__(config=config, max_cache_len=max_cache_len)
|
class SlidingWindowCache(StaticCache):
def __init__(self, config: PretrainedConfig, max_cache_len: int, *args, **kwargs):
pass
| 2
| 0
| 22
| 2
| 18
| 2
| 3
| 0.7
| 1
| 8
| 0
| 0
| 4
| 1
| 4
| 27
| 149
| 23
| 74
| 29
| 54
| 52
| 47
| 14
| 42
| 4
| 3
| 2
| 10
|
196
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/cache_utils.py
|
transformers.cache_utils.StaticCache
|
from .configuration_utils import PretrainedConfig
class StaticCache(Cache):
"""
Static Cache class to be used with `torch.compile(model)` and `torch.export()`. It will check the `config`
for potential hybrid cache structure, and initialize each layer accordingly.
See `Cache` for details on common methods that are implemented by all cache classes.
Args:
config (`PretrainedConfig`):
The config of the model for which this Cache will be used. It will be used to check for sliding
or hybrid layer structure, and initialize each layer accordingly.
max_cache_len (`int`):
The maximum number of tokens that this Cache should hold.
offloading (`bool`, *optional*, defaults to `False`):
Whether to perform offloading of the layers to `cpu`, to save GPU memory.
offload_only_non_sliding (`bool`, *optional*, defaults to `True`):
If `offloading` is `True`, this further decides if only the non-sliding layers will be offloaded (because
usually the sliding layers are small in size, so there is no need to offload them, and skipping it is faster).
Example:
```python
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, StaticCache
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
>>> inputs = tokenizer(text="My name is Llama", return_tensors="pt")
>>> # Prepare a cache class and pass it to model's forward
>>> # Leave empty space for 10 new tokens, which can be used when calling forward iteratively 10 times to generate
>>> max_generated_length = inputs.input_ids.shape[1] + 10
>>> past_key_values = StaticCache(config=model.config, max_cache_len=max_generated_length)
>>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
>>> outputs.past_key_values # access cache filled with key/values from generation
StaticCache()
```
"""
def __init__(self, config: PretrainedConfig, max_cache_len: int, offloading: bool=False, offload_only_non_sliding: bool=True, **kwargs):
config = config.get_text_config(decoder=True)
layer_types = getattr(config, 'layer_types', None)
if layer_types is None:
if getattr(config, 'sliding_window', None) is not None:
layer_types = ['sliding_attention' for _ in range(config.num_hidden_layers)]
elif getattr(config, 'attention_chunk_size', None) is not None:
layer_types = ['chunked_attention' for _ in range(config.num_hidden_layers)]
else:
layer_types = ['full_attention' for _ in range(config.num_hidden_layers)]
if hasattr(config, 'num_kv_shared_layers'):
layer_types = layer_types[:-config.num_kv_shared_layers]
layers = []
for layer_type in layer_types:
if layer_type == 'sliding_attention':
layer = StaticSlidingWindowLayer(max_cache_len=max_cache_len, sliding_window=config.sliding_window)
elif layer_type == 'chunked_attention':
layer = StaticSlidingWindowLayer(max_cache_len=max_cache_len, sliding_window=config.attention_chunk_size)
else:
layer = StaticLayer(max_cache_len=max_cache_len)
layers.append(layer)
super().__init__(layers=layers, offloading=offloading, offload_only_non_sliding=offload_only_non_sliding)
|
class StaticCache(Cache):
'''
Static Cache class to be used with `torch.compile(model)` and `torch.export()`. It will check the `config`
for potential hybrid cache structure, and initialize each layer accordingly.
See `Cache` for details on common methods that are implemented by all cache classes.
Args:
config (`PretrainedConfig`):
The config of the model for which this Cache will be used. It will be used to check for sliding
or hybrid layer structure, and initialize each layer accordingly.
max_cache_len (`int`):
The maximum number of tokens that this Cache should hold.
offloading (`bool`, *optional*, defaults to `False`):
Whether to perform offloading of the layers to `cpu`, to save GPU memory.
offload_only_non_sliding (`bool`, *optional*, defaults to `True`):
If `offloading` is `True`, this further decides if only the non-sliding layers will be offloaded (because
usually the sliding layers are small in size, so there is no need to offload them, and skipping it is faster).
Example:
```python
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, StaticCache
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
>>> inputs = tokenizer(text="My name is Llama", return_tensors="pt")
>>> # Prepare a cache class and pass it to model's forward
>>> # Leave empty space for 10 new tokens, which can be used when calling forward iteratively 10 times to generate
>>> max_generated_length = inputs.input_ids.shape[1] + 10
>>> past_key_values = StaticCache(config=model.config, max_cache_len=max_generated_length)
>>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
>>> outputs.past_key_values # access cache filled with key/values from generation
StaticCache()
```
'''
def __init__(self, config: PretrainedConfig, max_cache_len: int, offloading: bool=False, offload_only_non_sliding: bool=True, **kwargs):
pass
| 2
| 1
| 22
| 2
| 15
| 6
| 3
| 0.72
| 1
| 7
| 0
| 2
| 6
| 8
| 6
| 23
| 186
| 24
| 94
| 42
| 70
| 68
| 63
| 25
| 56
| 9
| 2
| 2
| 20
|
197
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/commands/__init__.py
|
transformers.commands.BaseTransformersCLICommand
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseTransformersCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
|
class BaseTransformersCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
pass
@abstractmethod
def run(self):
pass
| 6
| 0
| 2
| 0
| 2
| 0
| 1
| 0
| 1
| 2
| 0
| 11
| 1
| 0
| 2
| 22
| 9
| 1
| 8
| 5
| 2
| 0
| 5
| 3
| 2
| 1
| 4
| 0
| 2
|
198
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/commands/add_fast_image_processor.py
|
transformers.commands.add_fast_image_processor.AddFastImageProcessorCommand
|
from argparse import ArgumentParser, Namespace
from . import BaseTransformersCLICommand
class AddFastImageProcessorCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
add_fast_image_processor_parser = parser.add_parser('add-fast-image-processor')
add_fast_image_processor_parser.add_argument('--model-name', type=str, required=True, help="The name of the folder containing the model's implementation.")
add_fast_image_processor_parser.set_defaults(func=add_new_model_like_command_factory)
def __init__(self, model_name: str, *args):
self.model_name = model_name
def run(self):
add_fast_image_processor(model_name=self.model_name)
|
class AddFastImageProcessorCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
pass
def __init__(self, model_name: str, *args):
pass
def run(self):
pass
| 5
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 1
| 3
| 25
| 17
| 2
| 15
| 7
| 10
| 0
| 9
| 6
| 5
| 1
| 5
| 0
| 3
|
199
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/commands/add_new_model_like.py
|
transformers.commands.add_new_model_like.AddNewModelLikeCommand
|
from . import BaseTransformersCLICommand
from pathlib import Path
from argparse import ArgumentParser, Namespace
class AddNewModelLikeCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
add_new_model_like_parser = parser.add_parser('add-new-model-like')
add_new_model_like_parser.add_argument('--path_to_repo', type=str, help='When not using an editable install, the path to the Transformers repo.')
add_new_model_like_parser.set_defaults(func=add_new_model_like_command_factory)
def __init__(self, path_to_repo=None, *args):
self.old_model_infos, self.new_lowercase_name, self.new_model_paper_name, self.filenames_to_add, self.create_fast_image_processor = get_user_input()
self.path_to_repo = path_to_repo
def run(self):
if self.path_to_repo is not None:
global TRANSFORMERS_PATH
global REPO_PATH
REPO_PATH = Path(self.path_to_repo)
TRANSFORMERS_PATH = REPO_PATH / 'src' / 'transformers'
create_new_model_like(old_model_infos=self.old_model_infos, new_lowercase_name=self.new_lowercase_name, new_model_paper_name=self.new_model_paper_name, filenames_to_add=self.filenames_to_add, create_fast_image_processor=self.create_fast_image_processor)
|
class AddNewModelLikeCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
pass
def __init__(self, path_to_repo=None, *args):
pass
def run(self):
pass
| 5
| 0
| 15
| 1
| 13
| 0
| 2
| 0.02
| 1
| 4
| 1
| 0
| 2
| 6
| 3
| 25
| 48
| 5
| 42
| 16
| 35
| 1
| 24
| 14
| 18
| 2
| 5
| 2
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.