llama: update vendor code to commit ba1cb19c (#8101)

This commit is contained in:
Jeffrey Morgan
2024-12-14 14:55:51 -08:00
committed by GitHub
parent 60f75560a2
commit 7a81daf026
273 changed files with 3194 additions and 1900 deletions

View File

@@ -86,7 +86,7 @@ GPU_COMPILER_CUFLAGS = \
-D_GNU_SOURCE \
-D_XOPEN_SOURCE=600 \
-DUSE_PROF_API=1 \
-std=gnu++14 \
-std=gnu++17 \
-x hip \
-mllvm=-amdgpu-early-inline-all=true \
-mllvm=-amdgpu-function-calls=false \

View File

@@ -115,19 +115,19 @@ GGML_FILES= \
ggml/src/ggml-backend-impl.h \
ggml/include/ggml-alloc.h \
ggml/src/ggml-alloc.c \
ggml/src/ggml-aarch64.h \
ggml/src/ggml-aarch64.c \
ggml/include/ggml-blas.h \
ggml/include/ggml-cpp.h \
ggml/src/ggml-threading.cpp \
ggml/src/ggml-blas/ggml-blas.cpp \
ggml/src/ggml-cpu/ggml-cpu.c \
ggml/src/ggml-cpu/ggml-cpu-aarch64.c \
ggml/src/ggml-cpu/ggml-cpu.cpp \
ggml/src/ggml-cpu/ggml-cpu-aarch64.h \
ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp \
ggml/src/ggml-cpu/ggml-cpu-quants.h \
ggml/src/ggml-cpu/ggml-cpu-quants.c \
ggml/src/ggml-cpu/ggml-cpu-impl.h \
ggml/src/ggml-cpu/ggml-cpu-traits.h \
ggml/src/ggml-cpu/ggml-cpu-traits.cpp \
ggml/src/ggml-cpu/amx/amx.h \
ggml/src/ggml-cpu/amx/amx.cpp \
ggml/src/ggml-cpu/amx/mmq.cpp \

View File

@@ -23,7 +23,7 @@ ifeq ($(OS),windows)
else ifeq ($(OS),linux)
# On linux, nvcc requires avx512 -> -mavx512f -mavx512dq -mavx512bw
GPU_VECTOR_FLAGS=$(if $(filter avx512,$(GPU_RUNNER_CPU_FLAGS)),avx512f avx512dq avx512bw) $(filter-out avx512,$(GPU_RUNNER_CPU_FLAGS))
GPU_COMPILER_EXTRA_FLAGS = -fPIC -Wno-unused-function -std=c++11
GPU_COMPILER_EXTRA_FLAGS = -fPIC -Wno-unused-function -std=c++17
GPU_LIBS = $(sort $(wildcard $(addsuffix *.$(SHARED_EXT).*,$(addprefix $(GPU_LIB_DIR)/$(SHARED_PREFIX),$(GPU_RUNNER_LIBS_SHORT)))))
GPU_COMPILER_CFLAGS = $(CFLAGS) -Xcompiler -fPIC -D_GNU_SOURCE
GPU_COMPILER_CXXFLAGS = $(CXXFLAGS) -Xcompiler -fPIC -D_GNU_SOURCE

View File

@@ -17,7 +17,7 @@ GPU_RUNNER_LIBS = $(wildcard $(addsuffix .$(SHARED_EXT).*,$(addprefix $(GPU_LIB_
GPU_RUNNER_SRCS := \
$(filter-out $(wildcard llama/ggml-cuda/fattn*.cu),$(wildcard llama/ggml-cuda/*.cu)) \
$(wildcard llama/ggml-cuda/template-instances/mmq*.cu) \
llama/ggml.c llama/ggml-backend.cpp llama/ggml-alloc.c llama/ggml-quants.c llama/sgemm.cpp llama/ggml-aarch64.c llama/ggml-threading.cpp
llama/ggml.c llama/ggml-backend.cpp llama/ggml-alloc.c llama/ggml-quants.c llama/sgemm.cpp llama/ggml-threading.cpp
GPU_RUNNER_HDRS := \
$(wildcard llama/ggml-cuda/*.cuh)