added final checkpoint
Browse files- config.json +27 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- model_architecture.txt +44 -0
- special_tokens_map.json +15 -0
- tokenizer.json +0 -0
- tokenizer_config.json +16 -0
- training_args.bin +3 -0
- vocab.json +0 -0
config.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "/rgroup/dsig/llm-team/experiments/mlm-fine-tuning/tmp/models/timestamp_20250304_15-54-56/nasa-impact/nasa-smd-ibm-v0.1/checkpoint-41059",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"RobertaForMaskedLM"
|
| 5 |
+
],
|
| 6 |
+
"attention_probs_dropout_prob": 0.1,
|
| 7 |
+
"bos_token_id": 0,
|
| 8 |
+
"classifier_dropout": null,
|
| 9 |
+
"eos_token_id": 2,
|
| 10 |
+
"hidden_act": "gelu",
|
| 11 |
+
"hidden_dropout_prob": 0.1,
|
| 12 |
+
"hidden_size": 768,
|
| 13 |
+
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 3072,
|
| 15 |
+
"layer_norm_eps": 1e-05,
|
| 16 |
+
"max_position_embeddings": 1026,
|
| 17 |
+
"model_type": "roberta",
|
| 18 |
+
"num_attention_heads": 12,
|
| 19 |
+
"num_hidden_layers": 12,
|
| 20 |
+
"pad_token_id": 1,
|
| 21 |
+
"position_embedding_type": "absolute",
|
| 22 |
+
"torch_dtype": "float32",
|
| 23 |
+
"transformers_version": "4.47.1",
|
| 24 |
+
"type_vocab_size": 1,
|
| 25 |
+
"use_cache": true,
|
| 26 |
+
"vocab_size": 50265
|
| 27 |
+
}
|
merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:286a86f7f004b067bbc3a927cecc331fd8a3605a0af35ea182a80a8daddad878
|
| 3 |
+
size 500386812
|
model_architecture.txt
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
RobertaForMaskedLM(
|
| 2 |
+
(roberta): RobertaModel(
|
| 3 |
+
(embeddings): RobertaEmbeddings(
|
| 4 |
+
(word_embeddings): Embedding(50265, 768, padding_idx=1)
|
| 5 |
+
(position_embeddings): Embedding(1026, 768, padding_idx=1)
|
| 6 |
+
(token_type_embeddings): Embedding(1, 768)
|
| 7 |
+
(LayerNorm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 8 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
| 9 |
+
)
|
| 10 |
+
(encoder): RobertaEncoder(
|
| 11 |
+
(layer): ModuleList(
|
| 12 |
+
(0-11): 12 x RobertaLayer(
|
| 13 |
+
(attention): RobertaAttention(
|
| 14 |
+
(self): RobertaSdpaSelfAttention(
|
| 15 |
+
(query): Linear(in_features=768, out_features=768, bias=True)
|
| 16 |
+
(key): Linear(in_features=768, out_features=768, bias=True)
|
| 17 |
+
(value): Linear(in_features=768, out_features=768, bias=True)
|
| 18 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
| 19 |
+
)
|
| 20 |
+
(output): RobertaSelfOutput(
|
| 21 |
+
(dense): Linear(in_features=768, out_features=768, bias=True)
|
| 22 |
+
(LayerNorm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 23 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
| 24 |
+
)
|
| 25 |
+
)
|
| 26 |
+
(intermediate): RobertaIntermediate(
|
| 27 |
+
(dense): Linear(in_features=768, out_features=3072, bias=True)
|
| 28 |
+
(intermediate_act_fn): GELUActivation()
|
| 29 |
+
)
|
| 30 |
+
(output): RobertaOutput(
|
| 31 |
+
(dense): Linear(in_features=3072, out_features=768, bias=True)
|
| 32 |
+
(LayerNorm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 33 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
| 34 |
+
)
|
| 35 |
+
)
|
| 36 |
+
)
|
| 37 |
+
)
|
| 38 |
+
)
|
| 39 |
+
(lm_head): RobertaLMHead(
|
| 40 |
+
(dense): Linear(in_features=768, out_features=768, bias=True)
|
| 41 |
+
(layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 42 |
+
(decoder): Linear(in_features=768, out_features=50265, bias=True)
|
| 43 |
+
)
|
| 44 |
+
)
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": "<s>",
|
| 3 |
+
"cls_token": "<s>",
|
| 4 |
+
"eos_token": "</s>",
|
| 5 |
+
"mask_token": {
|
| 6 |
+
"content": "<mask>",
|
| 7 |
+
"lstrip": true,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false
|
| 11 |
+
},
|
| 12 |
+
"pad_token": "<pad>",
|
| 13 |
+
"sep_token": "</s>",
|
| 14 |
+
"unk_token": "<unk>"
|
| 15 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"bos_token": "<s>",
|
| 4 |
+
"cls_token": "<s>",
|
| 5 |
+
"eos_token": "</s>",
|
| 6 |
+
"errors": "replace",
|
| 7 |
+
"mask_token": "<mask>",
|
| 8 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 9 |
+
"name_or_path": "nasa_eb_tokenizer",
|
| 10 |
+
"pad_token": "<pad>",
|
| 11 |
+
"sep_token": "</s>",
|
| 12 |
+
"special_tokens_map_file": null,
|
| 13 |
+
"tokenizer_class": "RobertaTokenizer",
|
| 14 |
+
"trim_offsets": true,
|
| 15 |
+
"unk_token": "<unk>"
|
| 16 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5f1e44d2ad0904cb73142d06890d88099e3f9a9859761602d65745016c9e9e2e
|
| 3 |
+
size 5432
|
vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|