mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-04-16 19:46:08 +00:00

* llama : refactor llama_context, llama_kv_cache, llm_build_context ggml-ci * graph : don't mutate the KV cache during defrag ggml-ci * context : reduce virtuals + remove test function ggml-ci * context : move interface implementation to source file + factory ggml-ci * graph : move KV cache build functions to llama_context impl ggml-ci * graph : remove model reference from build_pooling ggml-ci * graph : remove llama_model reference ggml-ci * kv_cache : provide rope factors ggml-ci * graph : rework inputs to use only unique_ptr, remove attn input abstraction ggml-ci * context : remove llama_context_i abstraction ggml-ci * context : clean-up ggml-ci * graph : clean-up ggml-ci * llama : remove redundant keywords (struct, enum) ggml-ci * model : adapt gemma3 ggml-ci * graph : restore same attention ops as on master ggml-ci * llama : remove TODO + fix indent ggml-ci
16 lines
380 B
C++
16 lines
380 B
C++
#include "llama-io.h"
|
|
|
|
void llama_io_write_i::write_string(const std::string & str) {
|
|
uint32_t str_size = str.size();
|
|
|
|
write(&str_size, sizeof(str_size));
|
|
write(str.data(), str_size);
|
|
}
|
|
|
|
void llama_io_read_i::read_string(std::string & str) {
|
|
uint32_t str_size;
|
|
read_to(&str_size, sizeof(str_size));
|
|
|
|
str.assign((const char *) read(str_size), str_size);
|
|
}
|