mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-12-14 08:37:42 +00:00
* chore: add logger which eases logging of extras * chore: start migrating to key value * fix: import fixes * tests: temporarily comment out breaking test * refactor: move to kwarg based logging * style: fix import order * chore: implement self-review comments * test: drop failing test * chore: fix more import orders * docs: add changelog * tests: fix broken tests * chore: fix getting the frames * chore: add comment * chore: cleanup * chore: adapt remaining `%s` usages
36 lines
1.1 KiB
Python
36 lines
1.1 KiB
Python
import os
|
|
import random
|
|
|
|
import numpy as np
|
|
|
|
from haystack import logging
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def set_all_seeds(seed: int, deterministic_cudnn: bool = False) -> None:
|
|
"""
|
|
Setting multiple seeds to make runs reproducible.
|
|
|
|
Important: Enabling `deterministic_cudnn` gives you full reproducibility with CUDA,
|
|
but might slow down your training (see https://pytorch.org/docs/stable/notes/randomness.html#cudnn) !
|
|
|
|
:param seed:number to use as seed
|
|
:param deterministic_cudnn: Enable for full reproducibility when using CUDA. Caution: might slow down training.
|
|
"""
|
|
random.seed(seed)
|
|
np.random.seed(seed)
|
|
os.environ["PYTHONHASHSEED"] = str(seed)
|
|
|
|
try:
|
|
import torch
|
|
|
|
torch.manual_seed(seed)
|
|
torch.cuda.manual_seed_all(seed)
|
|
if deterministic_cudnn:
|
|
torch.backends.cudnn.deterministic = True
|
|
torch.backends.cudnn.benchmark = False
|
|
|
|
except (ImportError, ModuleNotFoundError) as exc:
|
|
logger.info("Could not set PyTorch seed because torch is not installed. Exception: {exception}", exception=exc)
|