Update README.md
Browse files
README.md
CHANGED
|
@@ -113,8 +113,8 @@ from sentence_transformers import CrossEncoder, util
|
|
| 113 |
from transformers import AutoTokenizer, TFAutoModel
|
| 114 |
|
| 115 |
# Load the bi-encoder
|
| 116 |
-
tokenizer = AutoTokenizer.from_pretrained("/
|
| 117 |
-
model = TFAutoModel.from_pretrained("/
|
| 118 |
|
| 119 |
# Encode queries and documents
|
| 120 |
query_embedding = model(tokenizer("Example query", return_tensors="tf"))[0]
|
|
@@ -173,7 +173,6 @@ print(similarity)
|
|
| 173 |
- Datasets: 3.6.0
|
| 174 |
- Tokenizers: 0.21.1
|
| 175 |
|
| 176 |
-
## Citation
|
| 177 |
|
| 178 |
## Reference
|
| 179 |
|
|
|
|
| 113 |
from transformers import AutoTokenizer, TFAutoModel
|
| 114 |
|
| 115 |
# Load the bi-encoder
|
| 116 |
+
tokenizer = AutoTokenizer.from_pretrained("CiscoAITeam/SecureBERT2.0-biencoder"")
|
| 117 |
+
model = TFAutoModel.from_pretrained("CiscoAITeam/SecureBERT2.0-biencoder"")
|
| 118 |
|
| 119 |
# Encode queries and documents
|
| 120 |
query_embedding = model(tokenizer("Example query", return_tensors="tf"))[0]
|
|
|
|
| 173 |
- Datasets: 3.6.0
|
| 174 |
- Tokenizers: 0.21.1
|
| 175 |
|
|
|
|
| 176 |
|
| 177 |
## Reference
|
| 178 |
|