$options = [
// Generation parameters
'temperature' => 0.7, // Controls randomness (0.0 to 1.0)
'max_tokens' => 1000, // Maximum tokens to generate
'top_p' => 0.95, // Nucleus sampling parameter
'frequency_penalty' => 0.0, // Penalize repeated tokens
'presence_penalty' => 0.0, // Penalize repeated topics
'stream' => false, // Enable streaming responses
'stop' => ["\n\n", "User:"], // Stop sequences
// Provider-specific options
'top_k' => 40, // For some providers
'response_format' => [ // OpenAI-specific format control
'type' => 'json_object'
],
// Additional provider-specific options...
];
$inference = new Inference();
$response = $inference->with(
messages: 'Write a short poem about programming.',
options: $options
)->toText();
// @doctest id="ee36"