Add new SentenceTransformer model
Browse files- 1_Pooling/config.json +10 -0
- README.md +393 -0
- config.json +30 -0
- config_sentence_transformers.json +10 -0
- model.safetensors +3 -0
- modules.json +20 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +37 -0
- tokenizer.json +0 -0
- tokenizer_config.json +58 -0
- vocab.txt +0 -0
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 384,
|
3 |
+
"pooling_mode_cls_token": true,
|
4 |
+
"pooling_mode_mean_tokens": false,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
README.md
ADDED
@@ -0,0 +1,393 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- sentence-transformers
|
4 |
+
- sentence-similarity
|
5 |
+
- feature-extraction
|
6 |
+
- generated_from_trainer
|
7 |
+
- dataset_size:2000
|
8 |
+
- loss:CoSENTLoss
|
9 |
+
base_model: avsolatorio/GIST-small-Embedding-v0
|
10 |
+
widget:
|
11 |
+
- source_sentence: do vivid seats tickets work?
|
12 |
+
sentences:
|
13 |
+
- Charlotte-Mecklenburg Schools will be closed for students on Friday due to the
|
14 |
+
forecast of severe weather. ... CMS staff members work with city and county leaders
|
15 |
+
to receive the most up-to-date information about road and weather conditions.
|
16 |
+
- Tickets are $40 per ticket and $400 for a table of ten. Tickets are available
|
17 |
+
for purchase when you register for the show.
|
18 |
+
- This service is currently offered free of charge by the bank. You can get the
|
19 |
+
last 'Available' balance of your account (by an SMS) by giving a Missed Call to
|
20 |
+
18008431122. You can get the Mini Statement (by an SMS) for last 5 transactions
|
21 |
+
in your account by giving a Missed Call to 18008431133. 1.
|
22 |
+
- source_sentence: is alexa compatible with tv?
|
23 |
+
sentences:
|
24 |
+
- To fix this Echo red light, start with the restart of the router and Amazon Echo.
|
25 |
+
In case, the restart process doesn't work, check for the device and app update
|
26 |
+
in Alexa app. If it's available, click the 'Update' button for compatibility reason.
|
27 |
+
- Ligament - A small band of dense, white, fibrous elastic tissue. Ligaments connect
|
28 |
+
the ends of bones together in order to form a joint. Tendon - A tough, flexible
|
29 |
+
band of fibrous connective tissue that connects muscles to bones.
|
30 |
+
- There are 610 calories in a 1 bowl serving of El Pollo Loco Original Pollo Bowl.
|
31 |
+
- source_sentence: can you play fortnite save the world on mac?
|
32 |
+
sentences:
|
33 |
+
- '[''In the Music app on your Mac, click iTunes Store in the sidebar. ... '', ''Click
|
34 |
+
Purchased (below Quick Links) near the top right of the iTunes Store window.'',
|
35 |
+
''Click Music near the top right of the page that appears. ... '', ''To download
|
36 |
+
an item, click its Download button .'']'
|
37 |
+
- Essential Oils in the Second and Third Trimesters. "In the second and third trimesters,
|
38 |
+
some essential oils are safe to use, as your baby is more developed," Edwards
|
39 |
+
adds. These include lavender, chamomile, and ylang ylang—all of which calm, relax,
|
40 |
+
and aid sleep.
|
41 |
+
- ADR holders do not have to transact the trade in the foreign currency or worry
|
42 |
+
about exchanging currency on the forex market. ... ADRs list on either the New
|
43 |
+
York Stock Exchange (NYSE), American Stock Exchange (AMEX), or the Nasdaq, but
|
44 |
+
they are also sold over-the-counter (OTC).
|
45 |
+
- source_sentence: how long does money take to transfer boi?
|
46 |
+
sentences:
|
47 |
+
- 'When will it take more than one working day? It will take more than one working
|
48 |
+
day to reach your payee''s bank when: You make a payment online after 3.30pm in
|
49 |
+
the Republic of Ireland or after 4.30pm in Northern Ireland and Great Britain
|
50 |
+
on a working day. Your payment will begin to process on the next working day.'
|
51 |
+
- If you had bought just one share of Microsoft at the IPO, you would now have 288
|
52 |
+
shares after all the splits. Those shares would be worth $44,505 at the current
|
53 |
+
stock quote of $154.53. A $5,000 investment would have purchased 238 shares at
|
54 |
+
the IPO price.
|
55 |
+
- FKM is the American standard ASTM short form name for Fluro-Elastomer. ... VITON™
|
56 |
+
is a registered trademark of Du Pont performance elastomers, the original developers
|
57 |
+
of the rubber. However, the Viton is also used as a general name for the material,
|
58 |
+
no matter who the manufacturer is.
|
59 |
+
- source_sentence: how long is a texas vehicle inspection report good for?
|
60 |
+
sentences:
|
61 |
+
- '[''Aerospace engineer.'', ''Automotive engineer.'', ''CAD technician.'', ''Contracting
|
62 |
+
civil engineer.'', ''Control and instrumentation engineer.'', ''Maintenance engineer.'',
|
63 |
+
''Mechanical engineer.'', ''Nuclear engineer.'']'
|
64 |
+
- A key difference is that it's simpler to unlock a credit lock than it is to “thaw”
|
65 |
+
a credit freeze. But a freeze may afford legal protections that a lock doesn't.
|
66 |
+
... The credit bureaus sometimes promote their credit lock services, which can
|
67 |
+
carry a monthly fee, alongside their credit freeze options, which are free.
|
68 |
+
- If your car fails its MOT you can only continue to drive it if the previous year's
|
69 |
+
MOT is still valid - which might occur if you submitted the car for its test two
|
70 |
+
weeks early. You can still drive it away from the testing centre or garage if
|
71 |
+
no 'dangerous' problems were identified during the MOT.
|
72 |
+
pipeline_tag: sentence-similarity
|
73 |
+
library_name: sentence-transformers
|
74 |
+
---
|
75 |
+
|
76 |
+
# SentenceTransformer based on avsolatorio/GIST-small-Embedding-v0
|
77 |
+
|
78 |
+
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [avsolatorio/GIST-small-Embedding-v0](https://huggingface.co/avsolatorio/GIST-small-Embedding-v0). It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
79 |
+
|
80 |
+
## Model Details
|
81 |
+
|
82 |
+
### Model Description
|
83 |
+
- **Model Type:** Sentence Transformer
|
84 |
+
- **Base model:** [avsolatorio/GIST-small-Embedding-v0](https://huggingface.co/avsolatorio/GIST-small-Embedding-v0) <!-- at revision 75e62fd210b9fde790430e0b2f040b0b00a021b1 -->
|
85 |
+
- **Maximum Sequence Length:** 512 tokens
|
86 |
+
- **Output Dimensionality:** 384 dimensions
|
87 |
+
- **Similarity Function:** Cosine Similarity
|
88 |
+
<!-- - **Training Dataset:** Unknown -->
|
89 |
+
<!-- - **Language:** Unknown -->
|
90 |
+
<!-- - **License:** Unknown -->
|
91 |
+
|
92 |
+
### Model Sources
|
93 |
+
|
94 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
95 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
96 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
97 |
+
|
98 |
+
### Full Model Architecture
|
99 |
+
|
100 |
+
```
|
101 |
+
SentenceTransformer(
|
102 |
+
(0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel
|
103 |
+
(1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
104 |
+
(2): Normalize()
|
105 |
+
)
|
106 |
+
```
|
107 |
+
|
108 |
+
## Usage
|
109 |
+
|
110 |
+
### Direct Usage (Sentence Transformers)
|
111 |
+
|
112 |
+
First install the Sentence Transformers library:
|
113 |
+
|
114 |
+
```bash
|
115 |
+
pip install -U sentence-transformers
|
116 |
+
```
|
117 |
+
|
118 |
+
Then you can load this model and run inference.
|
119 |
+
```python
|
120 |
+
from sentence_transformers import SentenceTransformer
|
121 |
+
|
122 |
+
# Download from the 🤗 Hub
|
123 |
+
model = SentenceTransformer("moshew/gist_small_ft_gooaq_v3")
|
124 |
+
# Run inference
|
125 |
+
sentences = [
|
126 |
+
'how long is a texas vehicle inspection report good for?',
|
127 |
+
"If your car fails its MOT you can only continue to drive it if the previous year's MOT is still valid - which might occur if you submitted the car for its test two weeks early. You can still drive it away from the testing centre or garage if no 'dangerous' problems were identified during the MOT.",
|
128 |
+
"['Aerospace engineer.', 'Automotive engineer.', 'CAD technician.', 'Contracting civil engineer.', 'Control and instrumentation engineer.', 'Maintenance engineer.', 'Mechanical engineer.', 'Nuclear engineer.']",
|
129 |
+
]
|
130 |
+
embeddings = model.encode(sentences)
|
131 |
+
print(embeddings.shape)
|
132 |
+
# [3, 384]
|
133 |
+
|
134 |
+
# Get the similarity scores for the embeddings
|
135 |
+
similarities = model.similarity(embeddings, embeddings)
|
136 |
+
print(similarities.shape)
|
137 |
+
# [3, 3]
|
138 |
+
```
|
139 |
+
|
140 |
+
<!--
|
141 |
+
### Direct Usage (Transformers)
|
142 |
+
|
143 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
144 |
+
|
145 |
+
</details>
|
146 |
+
-->
|
147 |
+
|
148 |
+
<!--
|
149 |
+
### Downstream Usage (Sentence Transformers)
|
150 |
+
|
151 |
+
You can finetune this model on your own dataset.
|
152 |
+
|
153 |
+
<details><summary>Click to expand</summary>
|
154 |
+
|
155 |
+
</details>
|
156 |
+
-->
|
157 |
+
|
158 |
+
<!--
|
159 |
+
### Out-of-Scope Use
|
160 |
+
|
161 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
162 |
+
-->
|
163 |
+
|
164 |
+
<!--
|
165 |
+
## Bias, Risks and Limitations
|
166 |
+
|
167 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
168 |
+
-->
|
169 |
+
|
170 |
+
<!--
|
171 |
+
### Recommendations
|
172 |
+
|
173 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
174 |
+
-->
|
175 |
+
|
176 |
+
## Training Details
|
177 |
+
|
178 |
+
### Training Dataset
|
179 |
+
|
180 |
+
#### Unnamed Dataset
|
181 |
+
|
182 |
+
* Size: 2,000 training samples
|
183 |
+
* Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>label</code>
|
184 |
+
* Approximate statistics based on the first 1000 samples:
|
185 |
+
| | sentence1 | sentence2 | label |
|
186 |
+
|:--------|:----------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|:--------------------------------------------------------------|
|
187 |
+
| type | string | string | float |
|
188 |
+
| details | <ul><li>min: 8 tokens</li><li>mean: 12.05 tokens</li><li>max: 23 tokens</li></ul> | <ul><li>min: 13 tokens</li><li>mean: 59.84 tokens</li><li>max: 124 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.5</li><li>max: 1.0</li></ul> |
|
189 |
+
* Samples:
|
190 |
+
| sentence1 | sentence2 | label |
|
191 |
+
|:--------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------|
|
192 |
+
| <code>what is the difference between rapid rise yeast and bread machine yeast?</code> | <code>Though there are some minor differences in shape and nutrients, Rapid-Rise Yeast is (pretty much) the same as Instant Yeast and Bread Machine Yeast. ... Also, Rapid-Rise Yeast is a little more potent than Active Dry Yeast and can be mixed in with your dry ingredients directly.</code> | <code>1.0</code> |
|
193 |
+
| <code>what is the difference between rapid rise yeast and bread machine yeast?</code> | <code>Fermentation recycles NAD+, and produces 2 ATPs. In lactic acid fermentation, pyruvate from glycolysis changes to lactic acid. ... In alcoholic fermentation, pyruvate changes to alcohol and carbon dioxide. This type of fermentation is carried out by yeasts and some bacteria.</code> | <code>0.0</code> |
|
194 |
+
| <code>are light kits universal for ceiling fans?</code> | <code>Not all Universal Light Kits are actually Universal. They can be universal to only that manufacturer. ... Casablanca and Hunter Ceiling Fan Light Kits are universal only to their own fans.</code> | <code>1.0</code> |
|
195 |
+
* Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:
|
196 |
+
```json
|
197 |
+
{
|
198 |
+
"scale": 20.0,
|
199 |
+
"similarity_fct": "pairwise_cos_sim"
|
200 |
+
}
|
201 |
+
```
|
202 |
+
|
203 |
+
### Training Hyperparameters
|
204 |
+
#### Non-Default Hyperparameters
|
205 |
+
|
206 |
+
- `per_device_train_batch_size`: 16
|
207 |
+
- `per_device_eval_batch_size`: 16
|
208 |
+
- `num_train_epochs`: 1
|
209 |
+
- `warmup_ratio`: 0.1
|
210 |
+
- `seed`: 12
|
211 |
+
- `bf16`: True
|
212 |
+
- `dataloader_num_workers`: 4
|
213 |
+
|
214 |
+
#### All Hyperparameters
|
215 |
+
<details><summary>Click to expand</summary>
|
216 |
+
|
217 |
+
- `overwrite_output_dir`: False
|
218 |
+
- `do_predict`: False
|
219 |
+
- `eval_strategy`: no
|
220 |
+
- `prediction_loss_only`: True
|
221 |
+
- `per_device_train_batch_size`: 16
|
222 |
+
- `per_device_eval_batch_size`: 16
|
223 |
+
- `per_gpu_train_batch_size`: None
|
224 |
+
- `per_gpu_eval_batch_size`: None
|
225 |
+
- `gradient_accumulation_steps`: 1
|
226 |
+
- `eval_accumulation_steps`: None
|
227 |
+
- `torch_empty_cache_steps`: None
|
228 |
+
- `learning_rate`: 5e-05
|
229 |
+
- `weight_decay`: 0.0
|
230 |
+
- `adam_beta1`: 0.9
|
231 |
+
- `adam_beta2`: 0.999
|
232 |
+
- `adam_epsilon`: 1e-08
|
233 |
+
- `max_grad_norm`: 1.0
|
234 |
+
- `num_train_epochs`: 1
|
235 |
+
- `max_steps`: -1
|
236 |
+
- `lr_scheduler_type`: linear
|
237 |
+
- `lr_scheduler_kwargs`: {}
|
238 |
+
- `warmup_ratio`: 0.1
|
239 |
+
- `warmup_steps`: 0
|
240 |
+
- `log_level`: passive
|
241 |
+
- `log_level_replica`: warning
|
242 |
+
- `log_on_each_node`: True
|
243 |
+
- `logging_nan_inf_filter`: True
|
244 |
+
- `save_safetensors`: True
|
245 |
+
- `save_on_each_node`: False
|
246 |
+
- `save_only_model`: False
|
247 |
+
- `restore_callback_states_from_checkpoint`: False
|
248 |
+
- `no_cuda`: False
|
249 |
+
- `use_cpu`: False
|
250 |
+
- `use_mps_device`: False
|
251 |
+
- `seed`: 12
|
252 |
+
- `data_seed`: None
|
253 |
+
- `jit_mode_eval`: False
|
254 |
+
- `use_ipex`: False
|
255 |
+
- `bf16`: True
|
256 |
+
- `fp16`: False
|
257 |
+
- `fp16_opt_level`: O1
|
258 |
+
- `half_precision_backend`: auto
|
259 |
+
- `bf16_full_eval`: False
|
260 |
+
- `fp16_full_eval`: False
|
261 |
+
- `tf32`: None
|
262 |
+
- `local_rank`: 0
|
263 |
+
- `ddp_backend`: None
|
264 |
+
- `tpu_num_cores`: None
|
265 |
+
- `tpu_metrics_debug`: False
|
266 |
+
- `debug`: []
|
267 |
+
- `dataloader_drop_last`: False
|
268 |
+
- `dataloader_num_workers`: 4
|
269 |
+
- `dataloader_prefetch_factor`: None
|
270 |
+
- `past_index`: -1
|
271 |
+
- `disable_tqdm`: False
|
272 |
+
- `remove_unused_columns`: True
|
273 |
+
- `label_names`: None
|
274 |
+
- `load_best_model_at_end`: False
|
275 |
+
- `ignore_data_skip`: False
|
276 |
+
- `fsdp`: []
|
277 |
+
- `fsdp_min_num_params`: 0
|
278 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
279 |
+
- `tp_size`: 0
|
280 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
281 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
282 |
+
- `deepspeed`: None
|
283 |
+
- `label_smoothing_factor`: 0.0
|
284 |
+
- `optim`: adamw_torch
|
285 |
+
- `optim_args`: None
|
286 |
+
- `adafactor`: False
|
287 |
+
- `group_by_length`: False
|
288 |
+
- `length_column_name`: length
|
289 |
+
- `ddp_find_unused_parameters`: None
|
290 |
+
- `ddp_bucket_cap_mb`: None
|
291 |
+
- `ddp_broadcast_buffers`: False
|
292 |
+
- `dataloader_pin_memory`: True
|
293 |
+
- `dataloader_persistent_workers`: False
|
294 |
+
- `skip_memory_metrics`: True
|
295 |
+
- `use_legacy_prediction_loop`: False
|
296 |
+
- `push_to_hub`: False
|
297 |
+
- `resume_from_checkpoint`: None
|
298 |
+
- `hub_model_id`: None
|
299 |
+
- `hub_strategy`: every_save
|
300 |
+
- `hub_private_repo`: None
|
301 |
+
- `hub_always_push`: False
|
302 |
+
- `gradient_checkpointing`: False
|
303 |
+
- `gradient_checkpointing_kwargs`: None
|
304 |
+
- `include_inputs_for_metrics`: False
|
305 |
+
- `include_for_metrics`: []
|
306 |
+
- `eval_do_concat_batches`: True
|
307 |
+
- `fp16_backend`: auto
|
308 |
+
- `push_to_hub_model_id`: None
|
309 |
+
- `push_to_hub_organization`: None
|
310 |
+
- `mp_parameters`:
|
311 |
+
- `auto_find_batch_size`: False
|
312 |
+
- `full_determinism`: False
|
313 |
+
- `torchdynamo`: None
|
314 |
+
- `ray_scope`: last
|
315 |
+
- `ddp_timeout`: 1800
|
316 |
+
- `torch_compile`: False
|
317 |
+
- `torch_compile_backend`: None
|
318 |
+
- `torch_compile_mode`: None
|
319 |
+
- `include_tokens_per_second`: False
|
320 |
+
- `include_num_input_tokens_seen`: False
|
321 |
+
- `neftune_noise_alpha`: None
|
322 |
+
- `optim_target_modules`: None
|
323 |
+
- `batch_eval_metrics`: False
|
324 |
+
- `eval_on_start`: False
|
325 |
+
- `use_liger_kernel`: False
|
326 |
+
- `eval_use_gather_object`: False
|
327 |
+
- `average_tokens_across_devices`: False
|
328 |
+
- `prompts`: None
|
329 |
+
- `batch_sampler`: batch_sampler
|
330 |
+
- `multi_dataset_batch_sampler`: proportional
|
331 |
+
|
332 |
+
</details>
|
333 |
+
|
334 |
+
### Training Logs
|
335 |
+
| Epoch | Step | Training Loss |
|
336 |
+
|:-----:|:----:|:-------------:|
|
337 |
+
| 0.008 | 1 | 3.5339 |
|
338 |
+
|
339 |
+
|
340 |
+
### Framework Versions
|
341 |
+
- Python: 3.11.12
|
342 |
+
- Sentence Transformers: 4.1.0
|
343 |
+
- Transformers: 4.51.3
|
344 |
+
- PyTorch: 2.6.0+cu124
|
345 |
+
- Accelerate: 1.5.2
|
346 |
+
- Datasets: 3.5.0
|
347 |
+
- Tokenizers: 0.21.1
|
348 |
+
|
349 |
+
## Citation
|
350 |
+
|
351 |
+
### BibTeX
|
352 |
+
|
353 |
+
#### Sentence Transformers
|
354 |
+
```bibtex
|
355 |
+
@inproceedings{reimers-2019-sentence-bert,
|
356 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
357 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
358 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
359 |
+
month = "11",
|
360 |
+
year = "2019",
|
361 |
+
publisher = "Association for Computational Linguistics",
|
362 |
+
url = "https://arxiv.org/abs/1908.10084",
|
363 |
+
}
|
364 |
+
```
|
365 |
+
|
366 |
+
#### CoSENTLoss
|
367 |
+
```bibtex
|
368 |
+
@online{kexuefm-8847,
|
369 |
+
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
|
370 |
+
author={Su Jianlin},
|
371 |
+
year={2022},
|
372 |
+
month={Jan},
|
373 |
+
url={https://kexue.fm/archives/8847},
|
374 |
+
}
|
375 |
+
```
|
376 |
+
|
377 |
+
<!--
|
378 |
+
## Glossary
|
379 |
+
|
380 |
+
*Clearly define terms in order to be accessible across audiences.*
|
381 |
+
-->
|
382 |
+
|
383 |
+
<!--
|
384 |
+
## Model Card Authors
|
385 |
+
|
386 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
387 |
+
-->
|
388 |
+
|
389 |
+
<!--
|
390 |
+
## Model Card Contact
|
391 |
+
|
392 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
393 |
+
-->
|
config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertModel"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"classifier_dropout": null,
|
7 |
+
"hidden_act": "gelu",
|
8 |
+
"hidden_dropout_prob": 0.1,
|
9 |
+
"hidden_size": 384,
|
10 |
+
"id2label": {
|
11 |
+
"0": "LABEL_0"
|
12 |
+
},
|
13 |
+
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 1536,
|
15 |
+
"label2id": {
|
16 |
+
"LABEL_0": 0
|
17 |
+
},
|
18 |
+
"layer_norm_eps": 1e-12,
|
19 |
+
"max_position_embeddings": 512,
|
20 |
+
"model_type": "bert",
|
21 |
+
"num_attention_heads": 12,
|
22 |
+
"num_hidden_layers": 12,
|
23 |
+
"pad_token_id": 0,
|
24 |
+
"position_embedding_type": "absolute",
|
25 |
+
"torch_dtype": "float32",
|
26 |
+
"transformers_version": "4.51.3",
|
27 |
+
"type_vocab_size": 2,
|
28 |
+
"use_cache": true,
|
29 |
+
"vocab_size": 30522
|
30 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "4.1.0",
|
4 |
+
"transformers": "4.51.3",
|
5 |
+
"pytorch": "2.6.0+cu124"
|
6 |
+
},
|
7 |
+
"prompts": {},
|
8 |
+
"default_prompt_name": null,
|
9 |
+
"similarity_fn_name": "cosine"
|
10 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d3716606fb287bc20efce7a52f228bdc55a98656af72d17b7885226def7c2cce
|
3 |
+
size 133462128
|
modules.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"idx": 2,
|
16 |
+
"name": "2",
|
17 |
+
"path": "2_Normalize",
|
18 |
+
"type": "sentence_transformers.models.Normalize"
|
19 |
+
}
|
20 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": true
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": {
|
3 |
+
"content": "[CLS]",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"mask_token": {
|
10 |
+
"content": "[MASK]",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "[PAD]",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"sep_token": {
|
24 |
+
"content": "[SEP]",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"unk_token": {
|
31 |
+
"content": "[UNK]",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
}
|
37 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_basic_tokenize": true,
|
47 |
+
"do_lower_case": true,
|
48 |
+
"extra_special_tokens": {},
|
49 |
+
"mask_token": "[MASK]",
|
50 |
+
"model_max_length": 512,
|
51 |
+
"never_split": null,
|
52 |
+
"pad_token": "[PAD]",
|
53 |
+
"sep_token": "[SEP]",
|
54 |
+
"strip_accents": null,
|
55 |
+
"tokenize_chinese_chars": true,
|
56 |
+
"tokenizer_class": "BertTokenizer",
|
57 |
+
"unk_token": "[UNK]"
|
58 |
+
}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|