YWZBrandon commited on
Commit
2cda56d
·
verified ·
1 Parent(s): 9ff0ef8

Training in progress, step 500

Browse files
config.json CHANGED
@@ -32,5 +32,5 @@
32
  "torch_dtype": "float32",
33
  "transformers_version": "4.46.3",
34
  "use_cache": false,
35
- "vocab_size": 129256
36
  }
 
32
  "torch_dtype": "float32",
33
  "transformers_version": "4.46.3",
34
  "use_cache": false,
35
+ "vocab_size": 128256
36
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b68beffbaa23e375c7678b78f6390368ed42a0a1d3ad14a75a5343a0c89ae5a5
3
- size 4910391944
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d32065d6879d85752d296c783c52aae278420fc7b4c40c0632ad7eea17ae04ec
3
+ size 4998767360
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:22f06013eaf2cd4914e092cf820e629c3a695ee1e0f0b8c06fd00445aeceea85
3
  size 4932808968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0a7837498bbe3416608f3fef4392c965358cbcd35d822f178ad70e18e7636b9
3
  size 4932808968
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:332ed21b1b29d3c86c0dc65937d7a4a6c6ac7f4ec57147faee0756ef1ad47bfb
3
- size 3020115584
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7b9e96e0668ea364b6a3bdaf60d7b7c693a815a906acb8d95a4944b3dd08fe4
3
+ size 2919452168
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 12863287296
4
  },
5
  "weight_map": {
6
  "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
@@ -124,7 +124,7 @@
124
  "model.layers.20.input_layernorm.weight": "model-00003-of-00003.safetensors",
125
  "model.layers.20.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
126
  "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
127
- "model.layers.20.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
128
  "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
129
  "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
130
  "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
@@ -240,7 +240,7 @@
240
  "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
241
  "model.layers.8.input_layernorm.weight": "model-00002-of-00003.safetensors",
242
  "model.layers.8.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
243
- "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
244
  "model.layers.8.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
245
  "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
246
  "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 12850999296
4
  },
5
  "weight_map": {
6
  "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
 
124
  "model.layers.20.input_layernorm.weight": "model-00003-of-00003.safetensors",
125
  "model.layers.20.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
126
  "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
128
  "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
129
  "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
130
  "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
 
240
  "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
241
  "model.layers.8.input_layernorm.weight": "model-00002-of-00003.safetensors",
242
  "model.layers.8.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
243
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
244
  "model.layers.8.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
245
  "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
246
  "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43b2afbd0d4209cab99d1ef597fbfeb8596a726dbd4582c3d86056c0b76e0cef
3
- size 17393973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
3
+ size 17209920
tokenizer_config.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9548f8a0e6bfbae7dab801d4a36a30e5309f624c0f87ee4f8cf083ca4a488646
3
  size 5752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f21e58a98925988401beb6ce25f466074aa004f3cb63819a7e3dfc641d28b3c6
3
  size 5752