mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-04-22 07:56:05 +00:00

* llama : functions -> methods (#11110) * llama : add struct llama_vocab to the API (#11156) ggml-ci * hparams : move vocab params to llama_vocab (#11159) ggml-ci * vocab : more pimpl (#11165) ggml-ci * vocab : minor tokenization optimizations (#11160) ggml-ci Co-authored-by: Diego Devesa <slarengh@gmail.com> * lora : update API names (#11167) ggml-ci * llama : update API names to use correct prefix (#11174) * llama : update API names to use correct prefix ggml-ci * cont ggml-ci * cont ggml-ci * minor [no ci] * vocab : llama_vocab_add_[be]os -> llama_vocab_get_add_[be]os (#11174) ggml-ci * vocab : llama_vocab_n_vocab -> llama_vocab_n_tokens (#11174) ggml-ci --------- Co-authored-by: Diego Devesa <slarengh@gmail.com>
33 lines
754 B
C++
33 lines
754 B
C++
#pragma once
|
|
|
|
// TODO: rename llama-sampling.h/.cpp to llama-sampler.h/.cpp ?
|
|
|
|
#include "llama.h"
|
|
|
|
#include <vector>
|
|
|
|
struct llama_vocab;
|
|
struct llama_grammar;
|
|
|
|
// sampler chain
|
|
|
|
struct llama_sampler_chain {
|
|
llama_sampler_chain_params params;
|
|
|
|
std::vector<struct llama_sampler *> samplers;
|
|
|
|
// timing
|
|
|
|
mutable int64_t t_sample_us;
|
|
|
|
mutable int32_t n_sample;
|
|
};
|
|
|
|
struct llama_sampler * llama_sampler_init_dry_testing(
|
|
int32_t context_size,
|
|
float dry_multiplier,
|
|
float dry_base,
|
|
int32_t dry_allowed_length,
|
|
int32_t dry_penalty_last_n,
|
|
const std::vector<std::vector<llama_token>>& seq_breakers);
|