diff --git a/.github/workflows/slow.yml b/.github/workflows/slow.yml index 097dcbdda..5231d9bb6 100644 --- a/.github/workflows/slow.yml +++ b/.github/workflows/slow.yml @@ -16,6 +16,7 @@ env: PYTHON_VERSION: "3.9" HATCH_VERSION: "1.14.1" HAYSTACK_MPS_ENABLED: false + HAYSTACK_XPU_ENABLED: false on: workflow_dispatch: # Activate this workflow manually diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 46c02b5b5..bec43f08c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -401,6 +401,9 @@ jobs: needs: unit-tests runs-on: windows-latest timeout-minutes: 30 + env: + HAYSTACK_XPU_ENABLED: false + steps: - uses: actions/checkout@v4 diff --git a/haystack/utils/device.py b/haystack/utils/device.py index 256ca14eb..38653a421 100644 --- a/haystack/utils/device.py +++ b/haystack/utils/device.py @@ -28,6 +28,7 @@ class DeviceType(Enum): GPU = "cuda" DISK = "disk" MPS = "mps" + XPU = "xpu" def __str__(self): return self.value @@ -126,6 +127,16 @@ class Device: """ return Device(DeviceType.MPS) + @staticmethod + def xpu() -> "Device": + """ + Create a generic Intel GPU Optimization device. + + :returns: + The XPU device. + """ + return Device(DeviceType.XPU) + @staticmethod def from_str(string: str) -> "Device": """ @@ -482,7 +493,7 @@ def _get_default_device() -> Device: Return the default device for Haystack. Precedence: - GPU > MPS > CPU. If PyTorch is not installed, only CPU is available. + GPU > XPU > MPS > CPU. If PyTorch is not installed, only CPU is available. :returns: The default device. @@ -496,12 +507,21 @@ def _get_default_device() -> Device: and os.getenv("HAYSTACK_MPS_ENABLED", "true") != "false" ) has_cuda = torch.cuda.is_available() + has_xpu = ( + hasattr(torch, "xpu") + and hasattr(torch.xpu, "is_available") + and torch.xpu.is_available() + and os.getenv("HAYSTACK_XPU_ENABLED", "true") != "false" + ) except ImportError: has_mps = False has_cuda = False + has_xpu = False if has_cuda: return Device.gpu() + elif has_xpu: + return Device.xpu() elif has_mps: return Device.mps() else: diff --git a/test/utils/test_device.py b/test/utils/test_device.py index 88cd68d5c..997de6ee1 100644 --- a/test/utils/test_device.py +++ b/test/utils/test_device.py @@ -22,12 +22,14 @@ def test_device_creation(): assert Device.cpu().type == DeviceType.CPU assert Device.gpu().type == DeviceType.GPU assert Device.mps().type == DeviceType.MPS + assert Device.xpu().type == DeviceType.XPU assert Device.disk().type == DeviceType.DISK assert Device.from_str("cpu") == Device.cpu() assert Device.from_str("cuda:1") == Device.gpu(1) assert Device.from_str("disk") == Device.disk() assert Device.from_str("mps:0") == Device(DeviceType.MPS, 0) + assert Device.from_str("xpu:0") == Device(DeviceType.XPU, 0) with pytest.raises(ValueError, match="Device id must be >= 0"): Device.gpu(-1) @@ -115,23 +117,38 @@ def test_component_device_multiple(): assert multiple.first_device == ComponentDevice.from_single(Device.cpu()) +@patch("torch.xpu.is_available") @patch("torch.backends.mps.is_available") @patch("torch.cuda.is_available") -def test_component_device_resolution(torch_cuda_is_available, torch_backends_mps_is_available): +def test_component_device_resolution(torch_cuda_is_available, torch_backends_mps_is_available, torch_xpu_is_available): assert ComponentDevice.resolve_device(ComponentDevice.from_single(Device.cpu()))._single_device == Device.cpu() torch_cuda_is_available.return_value = True assert ComponentDevice.resolve_device(None)._single_device == Device.gpu(0) torch_cuda_is_available.return_value = False + torch_xpu_is_available.return_value = True + torch_backends_mps_is_available.return_value = False + assert ComponentDevice.resolve_device(None)._single_device == Device.xpu() + + torch_cuda_is_available.return_value = False + torch_xpu_is_available.return_value = False torch_backends_mps_is_available.return_value = True assert ComponentDevice.resolve_device(None)._single_device == Device.mps() torch_cuda_is_available.return_value = False + torch_xpu_is_available.return_value = False torch_backends_mps_is_available.return_value = False assert ComponentDevice.resolve_device(None)._single_device == Device.cpu() torch_cuda_is_available.return_value = False + torch_xpu_is_available.return_value = False torch_backends_mps_is_available.return_value = True os.environ["HAYSTACK_MPS_ENABLED"] = "false" assert ComponentDevice.resolve_device(None)._single_device == Device.cpu() + + torch_cuda_is_available.return_value = False + torch_xpu_is_available.return_value = True + os.environ["HAYSTACK_XPU_ENABLED"] = "false" + torch_backends_mps_is_available.return_value = False + assert ComponentDevice.resolve_device(None)._single_device == Device.cpu()