Skip to content

Commit fec8f6d

Browse files
IAmStoxensarrazin
andauthored
Fix system message handling to preserve user-configured system prompts (huggingface#1764)
Previously, when a preprompt was provided, it would completely replace any user-configured system message instead of being prepended to it. Co-authored-by: Nathan Sarrazin <[email protected]>
1 parent c95b148 commit fec8f6d

File tree

1 file changed

+28
-7
lines changed

1 file changed

+28
-7
lines changed

src/lib/server/endpoints/openai/endpointOai.ts

Lines changed: 28 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -198,25 +198,42 @@ export async function endpointOai(
198198
toolResults,
199199
conversationId,
200200
}) => {
201+
// Format messages for the chat API, handling multimodal content if supported
201202
let messagesOpenAI: OpenAI.Chat.Completions.ChatCompletionMessageParam[] =
202203
await prepareMessages(messages, imageProcessor, !model.tools && model.multimodal);
203204

204-
if (messagesOpenAI?.[0]?.role !== "system") {
205-
messagesOpenAI = [{ role: "system", content: "" }, ...messagesOpenAI];
206-
}
205+
// Check if a system message already exists as the first message
206+
const hasSystemMessage = messagesOpenAI.length > 0 && messagesOpenAI[0]?.role === "system";
207207

208-
if (messagesOpenAI?.[0]) {
209-
messagesOpenAI[0].content = preprompt ?? "";
208+
if (hasSystemMessage) {
209+
// System message exists - preserve user configuration
210+
if (preprompt !== undefined) {
211+
// Prepend preprompt to existing system message if preprompt exists
212+
const userSystemPrompt = messagesOpenAI[0].content || "";
213+
messagesOpenAI[0].content =
214+
preprompt + (userSystemPrompt ? "\n\n" + userSystemPrompt : "");
215+
}
216+
// If no preprompt, user's system message remains unchanged
217+
} else {
218+
// No system message exists - create a new one with preprompt or empty string
219+
messagesOpenAI = [{ role: "system", content: preprompt ?? "" }, ...messagesOpenAI];
210220
}
211221

212-
// if system role is not supported, convert first message to a user message.
213-
if (!model.systemRoleSupported && messagesOpenAI?.[0]?.role === "system") {
222+
// Handle models that don't support system role by converting to user message
223+
// This maintains compatibility with older or non-standard models
224+
if (
225+
!model.systemRoleSupported &&
226+
messagesOpenAI.length > 0 &&
227+
messagesOpenAI[0]?.role === "system"
228+
) {
214229
messagesOpenAI[0] = {
215230
...messagesOpenAI[0],
216231
role: "user",
217232
};
218233
}
219234

235+
// Format tool results for the API to provide context for follow-up tool calls
236+
// This creates the full conversation flow needed for multi-step tool interactions
220237
if (toolResults && toolResults.length > 0) {
221238
const toolCallRequests: OpenAI.Chat.Completions.ChatCompletionAssistantMessageParam = {
222239
role: "assistant",
@@ -253,12 +270,14 @@ export async function endpointOai(
253270
messagesOpenAI.push(...responses);
254271
}
255272

273+
// Combine model defaults with request-specific parameters
256274
const parameters = { ...model.parameters, ...generateSettings };
257275
const toolCallChoices = createChatCompletionToolsArray(tools);
258276
const body = {
259277
model: model.id ?? model.name,
260278
messages: messagesOpenAI,
261279
stream: streamingSupported,
280+
// Support two different ways of specifying token limits depending on the model
262281
...(useCompletionTokens
263282
? { max_completion_tokens: parameters?.max_new_tokens }
264283
: { max_tokens: parameters?.max_new_tokens }),
@@ -267,9 +286,11 @@ export async function endpointOai(
267286
top_p: parameters?.top_p,
268287
frequency_penalty: parameters?.repetition_penalty,
269288
presence_penalty: parameters?.presence_penalty,
289+
// Only include tool configuration if tools are provided
270290
...(toolCallChoices.length > 0 ? { tools: toolCallChoices, tool_choice: "auto" } : {}),
271291
};
272292

293+
// Handle both streaming and non-streaming responses with appropriate processors
273294
if (streamingSupported) {
274295
const openChatAICompletion = await openai.chat.completions.create(
275296
body as ChatCompletionCreateParamsStreaming,

0 commit comments

Comments
 (0)