mirror of
https://github.com/deepset-ai/haystack.git
synced 2026-01-01 09:37:36 +00:00
* initial commit * Add latest docstring and tutorial changes * added comments and fixed bug * fixed bugs, added benchmark and added documentation * Add latest docstring and tutorial changes * fix type: ignore comment * fix logging in benchmark * fixed distillation config * Add latest docstring and tutorial changes * added type annotations * fixed distillation loss calculation * added type annotations * fixed distillation mse loss * improved model distillation benchmark config loading * added temperature for model distillation * removed uncessary imports, added comments, added named parameter calls * Add latest docstring and tutorial changes * added some more comments * added distillation test * fixed distillation test * removed unnecessary import * fix softmax dimension * add grid search * improved model distillation benchmark config * fixed model distillation hyperparameter search * added doc strings and type hints for model distillation * Add latest docstring and tutorial changes * fixed type hints * fixed type hints * fixed type hints * wrote out params instead of kwargs in DistillationDataSilo initializer * fixed type hints * fixed typo * fixed typo Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
25 lines
686 B
JSON
25 lines
686 B
JSON
{
|
|
"student_model": {
|
|
"model_name_or_path": "roberta-base",
|
|
"batch_size": 80
|
|
},
|
|
"teacher_model": {
|
|
"model_name_or_path": "deepset/roberta-large-squad2",
|
|
"batch_size": 512
|
|
},
|
|
"distillation_settings": {
|
|
"distillation_loss": "kl_div",
|
|
"distillation_loss_weight": [0.75, 1],
|
|
"temperature": [5, 10]
|
|
},
|
|
"training_settings": {
|
|
"n_epochs": 2,
|
|
"max_seq_len": 384,
|
|
"learning_rate": 3e-5
|
|
},
|
|
"dataset": "squad2",
|
|
"download_folder": "dataset/squad2",
|
|
"evaluate_teacher": true,
|
|
"evaluate_student_without_distillation": true,
|
|
"evaluate_student_with_distillation": true
|
|
} |