Improve multi-gpu handling at the limit

Still not complete, needs some refinement to our prediction to understand the
discrete GPUs available space so we can see how many layers fit in each one
since we can't split one layer across multiple GPUs we can't treat free space
as one logical block
This commit is contained in:
Daniel Hiltgen
2024-05-18 12:34:31 -07:00
parent 206797bda4
commit 6fd04ca922
11 changed files with 390 additions and 90 deletions

View File

@@ -27,7 +27,7 @@ const (
GPUTotalMemoryFileGlob = "mem_banks/*/properties" // size_in_bytes line
// Direct Rendering Manager sysfs location
DRMDeviceDirGlob = "/sys/class/drm/card[0-9]/device"
DRMDeviceDirGlob = "/sys/class/drm/card*/device"
DRMTotalMemoryFile = "mem_info_vram_total"
DRMUsedMemoryFile = "mem_info_vram_used"

View File

@@ -246,10 +246,6 @@ func GetGPUInfo() GpuInfoList {
return GpuInfoList{cpus[0].GpuInfo}
}
// TODO - implement
// TODO refine the discovery to only gather total memory
// On windows we bundle the nvidia library one level above the runner dir
depPath := ""
if runtime.GOOS == "windows" && envconfig.RunnersDir != "" {

View File

@@ -44,14 +44,14 @@ type CPUInfo struct {
type CudaGPUInfo struct {
GpuInfo
index int // device index
index int // nolint: unused
}
type CudaGPUInfoList []CudaGPUInfo
type RocmGPUInfo struct {
GpuInfo
usedFilepath string // linux
index int // device index on windows
usedFilepath string // nolint: unused
index int // nolint: unused
}
type RocmGPUInfoList []RocmGPUInfo