Merge pull request #4143 from ollama/mxyng/final-response

omit prompt and generate settings from final response
This commit is contained in:
Michael Yang
2024-05-03 17:39:49 -07:00
committed by GitHub

View File

@@ -1186,8 +1186,6 @@ struct llama_server_context
{"model", params.model_alias},
{"tokens_predicted", slot.n_decoded},
{"tokens_evaluated", slot.n_prompt_tokens},
{"generation_settings", get_formated_generation(slot)},
{"prompt", slot.prompt},
{"truncated", slot.truncated},
{"stopped_eos", slot.stopped_eos},
{"stopped_word", slot.stopped_word},