From 1ec208083cb896dd8772a2f962151fa3d4a74e36 Mon Sep 17 00:00:00 2001 From: SAMI Date: Wed, 5 Feb 2025 14:45:40 +0700 Subject: [PATCH] llava: add quantization for the visual projector LLAVA, Qwen2VL (#11644) * Added quantization for visual projector * Added README * Fixed the clip quantize implementation in the file * Fixed the gcc warning regarding minor linting * Removed trailing whitespace --- examples/llava/CMakeLists.txt | 7 ++++ examples/llava/README-quantize.md | 44 +++++++++++++++++++++ examples/llava/clip-quantize-cli.cpp | 59 ++++++++++++++++++++++++++++ examples/llava/clip.cpp | 8 ++-- 4 files changed, 113 insertions(+), 5 deletions(-) create mode 100644 examples/llava/README-quantize.md create mode 100644 examples/llava/clip-quantize-cli.cpp diff --git a/examples/llava/CMakeLists.txt b/examples/llava/CMakeLists.txt index 3ce0d60c8..319effd19 100644 --- a/examples/llava/CMakeLists.txt +++ b/examples/llava/CMakeLists.txt @@ -50,3 +50,10 @@ set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-qwen2vl-cli) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) target_compile_features(${TARGET} PRIVATE cxx_std_17) + +set(TARGET llama-llava-clip-quantize-cli) +add_executable(${TARGET} clip-quantize-cli.cpp) +set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-llava-clip-quantize-cli) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/llava/README-quantize.md b/examples/llava/README-quantize.md new file mode 100644 index 000000000..b931513ab --- /dev/null +++ b/examples/llava/README-quantize.md @@ -0,0 +1,44 @@ +# Quantizing CLIP Visual Projector + +This is the tool for quantizing the CLIP visual projector model. Quantization reduces the precision of the model's weights, which can significantly decrease the model size and improve inference speed, often with minimal impact on performance. + +## Usage + +To quantize a CLIP visual projector model, use the following command: + +```sh +./bin/llama-llava-clip-quantize-cli /path/to/ggml-model-f32.gguf /path/to/ggml-model-quantized.gguf +``` + +After the quantization, the visual projector can be used freely with the existing LLAVA cli (LLAVA, Qwen2VL, etc). + +### Arguments + +- `/path/to/ggml-model-f32.gguf`: The path to the input model file in FP32 or FP16 format. +- `/path/to/ggml-model-quantized.gguf`: The path where the quantized model will be saved. +- ``: The quantization type to apply. This should be an integer corresponding to one of the quantization types defined in the `enum ggml_type`. + +### Quantization Types + +The following quantization types are supported, based on the `enum ggml_type` definition: + +- `2` - `q4_0`: 4-bit quantization with a single scale value. +- `3` - `q4_1`: 4-bit quantization with a separate scale value for each block. +- `6` - `q5_0`: 5-bit quantization with a single scale value. +- `7` - `q5_1`: 5-bit quantization with a separate scale value for each block. +- `8` - `q8_0`: 8-bit quantization with a single scale value. + +### Example + +To quantize a model using the `q4_0` quantization type, you would run: + +```sh +./bin/llama-llava-clip-quantize-cli /path/to/ggml-model-f32.gguf /path/to/ggml-model-quantized.gguf 2 +``` + +This command will generate a quantized model at `/path/to/ggml-model-quantized.gguf` using the `q4_0` quantization method. + +## Notes + +- Quantization can lead to a loss in model accuracy, depending on the chosen quantization type. It is recommended to evaluate the quantized model's performance on your specific task to ensure it meets your requirements. +- The quantized model will typically be smaller in size and faster to run, making it more suitable for deployment in resource-constrained environments. diff --git a/examples/llava/clip-quantize-cli.cpp b/examples/llava/clip-quantize-cli.cpp new file mode 100644 index 000000000..566506954 --- /dev/null +++ b/examples/llava/clip-quantize-cli.cpp @@ -0,0 +1,59 @@ +#include "arg.h" +#include "base64.hpp" +#include "log.h" +#include "common.h" +#include "sampling.h" +#include "clip.h" +#include "llava.h" +#include "llama.h" +#include "ggml.h" + +static void print_usage(int argc, char ** argv) { + (void) argc; + + fprintf(stderr, "usage: %s /path/to/ggml-model-f32.gguf /path/to/ggml-model-quantized.gguf type\n", argv[0]); + fprintf(stderr, " type = 2 - q4_0\n"); + fprintf(stderr, " type = 3 - q4_1\n"); + fprintf(stderr, " type = 6 - q5_0\n"); + fprintf(stderr, " type = 7 - q5_1\n"); + fprintf(stderr, " type = 8 - q8_0\n"); +} + +int main(int argc, char ** argv) { + if (argc != 4) { + print_usage(argc, argv); + return 1; + } + + const std::string fname_inp = argv[1]; + const std::string fname_out = argv[2]; + + const int itype = atoi(argv[3]); + + const int64_t t_main_start_us = ggml_time_us(); + + int64_t t_quantize_us = 0; + + // load the model + { + const int64_t t_start_us = ggml_time_us(); + + if (!clip_model_quantize(fname_inp.c_str(), fname_out.c_str(), itype)) { + fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); + return 1; + } + + t_quantize_us = ggml_time_us() - t_start_us; + } + + // report timing + { + const int64_t t_main_end_us = ggml_time_us(); + + printf("\n"); + printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us / 1000.0f); + printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us) / 1000.0f); + } + + return 0; +} diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 7367d44cb..271cf2a2a 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -2745,10 +2745,8 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima } bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype) { - ggml_type type = GGML_TYPE_Q4_1; - assert(itype < GGML_TYPE_COUNT); - type = static_cast(itype); + ggml_type type = static_cast(itype); auto * ctx_clip = clip_model_load(fname_inp, 2); @@ -2801,8 +2799,8 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i } } - // quantize only 2D tensors - quantize &= (ggml_n_dims(cur) == 2); + // quantize only 2D tensors and bigger than block size + quantize &= (ggml_n_dims(cur) == 2) && cur->ne[0] > ggml_blck_size(type); if (quantize) { new_type = type;