From 5e786e89cb94dfd9008c4db75e456d69f1365cc0 Mon Sep 17 00:00:00 2001 From: oleg Date: Wed, 26 Mar 2025 14:26:09 +0100 Subject: [PATCH 1/2] fix(Basic LLM Chain Node): Prevent incorrect wrapping of output (#14183) --- .../nodes/chains/ChainLLM/ChainLlm.node.ts | 4 +- .../chains/ChainLLM/methods/chainExecutor.ts | 52 +++++++- .../ChainLLM/methods/responseFormatter.ts | 14 ++- .../ChainLLM/test/chainExecutor.test.ts | 111 +++++++++++++++++- .../ChainLLM/test/responseFormatter.test.ts | 18 ++- 5 files changed, 174 insertions(+), 25 deletions(-) diff --git a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts index 1419e5823cd7e..24ae67b20b6b8 100644 --- a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts +++ b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/ChainLlm.node.ts @@ -34,7 +34,7 @@ export class ChainLlm implements INodeType { icon: 'fa:link', iconColor: 'black', group: ['transform'], - version: [1, 1.1, 1.2, 1.3, 1.4, 1.5], + version: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6], description: 'A simple chain to prompt a large language model', defaults: { name: 'Basic LLM Chain', @@ -119,7 +119,7 @@ export class ChainLlm implements INodeType { // Process each response and add to return data responses.forEach((response) => { returnData.push({ - json: formatResponse(response), + json: formatResponse(response, this.getNode().typeVersion), }); }); } catch (error) { diff --git a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/methods/chainExecutor.ts b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/methods/chainExecutor.ts index 60f2c3fd00972..0f3b5dc120d1e 100644 --- a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/methods/chainExecutor.ts +++ b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/methods/chainExecutor.ts @@ -1,5 +1,6 @@ import type { BaseLanguageModel } from '@langchain/core/language_models/base'; -import { StringOutputParser } from '@langchain/core/output_parsers'; +import type { BaseLLMOutputParser } from '@langchain/core/output_parsers'; +import { JsonOutputParser, StringOutputParser } from '@langchain/core/output_parsers'; import type { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts'; import type { IExecuteFunctions } from 'n8n-workflow'; @@ -8,6 +9,46 @@ import { getTracingConfig } from '@utils/tracing'; import { createPromptTemplate } from './promptUtils'; import type { ChainExecutionParams } from './types'; +/** + * Type guard to check if the LLM has a modelKwargs property(OpenAI) + */ +export function isModelWithResponseFormat( + llm: BaseLanguageModel, +): llm is BaseLanguageModel & { modelKwargs: { response_format: { type: string } } } { + return ( + 'modelKwargs' in llm && + !!llm.modelKwargs && + typeof llm.modelKwargs === 'object' && + 'response_format' in llm.modelKwargs + ); +} + +/** + * Type guard to check if the LLM has a format property(Ollama) + */ +export function isModelWithFormat( + llm: BaseLanguageModel, +): llm is BaseLanguageModel & { format: string } { + return 'format' in llm && typeof llm.format !== 'undefined'; +} + +/** + * Determines if an LLM is configured to output JSON and returns the appropriate output parser + */ +export function getOutputParserForLLM( + llm: BaseLanguageModel, +): BaseLLMOutputParser> { + if (isModelWithResponseFormat(llm) && llm.modelKwargs?.response_format?.type === 'json_object') { + return new JsonOutputParser(); + } + + if (isModelWithFormat(llm) && llm.format === 'json') { + return new JsonOutputParser(); + } + + return new StringOutputParser(); +} + /** * Creates a simple chain for LLMs without output parsers */ @@ -21,11 +62,10 @@ async function executeSimpleChain({ llm: BaseLanguageModel; query: string; prompt: ChatPromptTemplate | PromptTemplate; -}): Promise { - const chain = prompt - .pipe(llm) - .pipe(new StringOutputParser()) - .withConfig(getTracingConfig(context)); +}) { + const outputParser = getOutputParserForLLM(llm); + + const chain = prompt.pipe(llm).pipe(outputParser).withConfig(getTracingConfig(context)); // Execute the chain const response = await chain.invoke({ diff --git a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/methods/responseFormatter.ts b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/methods/responseFormatter.ts index e045f5fab9607..6f1c65b79ee8a 100644 --- a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/methods/responseFormatter.ts +++ b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/methods/responseFormatter.ts @@ -3,12 +3,10 @@ import type { IDataObject } from 'n8n-workflow'; /** * Formats the response from the LLM chain into a consistent structure */ -export function formatResponse(response: unknown): IDataObject { +export function formatResponse(response: unknown, version: number): IDataObject { if (typeof response === 'string') { return { - response: { - text: response.trim(), - }, + text: response.trim(), }; } @@ -19,7 +17,13 @@ export function formatResponse(response: unknown): IDataObject { } if (response instanceof Object) { - return response as IDataObject; + if (version >= 1.6) { + return response as IDataObject; + } + + return { + text: JSON.stringify(response), + }; } return { diff --git a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/test/chainExecutor.test.ts b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/test/chainExecutor.test.ts index 7cb3456d89fcd..4ca92c7222561 100644 --- a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/test/chainExecutor.test.ts +++ b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/test/chainExecutor.test.ts @@ -1,4 +1,5 @@ -import { StringOutputParser } from '@langchain/core/output_parsers'; +import type { BaseChatModel } from '@langchain/core/language_models/chat_models'; +import { JsonOutputParser, StringOutputParser } from '@langchain/core/output_parsers'; import { ChatPromptTemplate, PromptTemplate } from '@langchain/core/prompts'; import { FakeLLM, FakeChatModel } from '@langchain/core/utils/testing'; import { mock } from 'jest-mock-extended'; @@ -8,6 +9,7 @@ import type { N8nOutputParser } from '@utils/output_parsers/N8nOutputParser'; import * as tracing from '@utils/tracing'; import { executeChain } from '../methods/chainExecutor'; +import * as chainExecutor from '../methods/chainExecutor'; import * as promptUtils from '../methods/promptUtils'; jest.mock('@utils/tracing', () => ({ @@ -27,6 +29,41 @@ describe('chainExecutor', () => { jest.clearAllMocks(); }); + describe('getOutputParserForLLM', () => { + it('should return JsonOutputParser for OpenAI-like models with json_object response format', () => { + const openAILikeModel = { + modelKwargs: { + response_format: { + type: 'json_object', + }, + }, + }; + + const parser = chainExecutor.getOutputParserForLLM( + openAILikeModel as unknown as BaseChatModel, + ); + expect(parser).toBeInstanceOf(JsonOutputParser); + }); + + it('should return JsonOutputParser for Ollama models with json format', () => { + const ollamaLikeModel = { + format: 'json', + }; + + const parser = chainExecutor.getOutputParserForLLM( + ollamaLikeModel as unknown as BaseChatModel, + ); + expect(parser).toBeInstanceOf(JsonOutputParser); + }); + + it('should return StringOutputParser for models without JSON format settings', () => { + const regularModel = new FakeLLM({}); + + const parser = chainExecutor.getOutputParserForLLM(regularModel); + expect(parser).toBeInstanceOf(StringOutputParser); + }); + }); + describe('executeChain', () => { it('should execute a simple chain without output parsers', async () => { const fakeLLM = new FakeLLM({ response: 'Test response' }); @@ -219,5 +256,77 @@ describe('chainExecutor', () => { expect(result).toEqual(['Test chat response']); }); + + it('should use JsonOutputParser for OpenAI models with json_object response format', async () => { + const fakeOpenAIModel = new FakeChatModel({}); + ( + fakeOpenAIModel as unknown as { modelKwargs: { response_format: { type: string } } } + ).modelKwargs = { + response_format: { type: 'json_object' }, + }; + + const mockPromptTemplate = new PromptTemplate({ + template: '{query}', + inputVariables: ['query'], + }); + + const mockChain = { + invoke: jest.fn().mockResolvedValue('{"result": "json data"}'), + }; + + const withConfigMock = jest.fn().mockReturnValue(mockChain); + const pipeOutputParserMock = jest.fn().mockReturnValue({ + withConfig: withConfigMock, + }); + + mockPromptTemplate.pipe = jest.fn().mockReturnValue({ + pipe: pipeOutputParserMock, + }); + + (promptUtils.createPromptTemplate as jest.Mock).mockResolvedValue(mockPromptTemplate); + + await executeChain({ + context: mockContext, + itemIndex: 0, + query: 'Hello', + llm: fakeOpenAIModel, + }); + + expect(pipeOutputParserMock).toHaveBeenCalledWith(expect.any(JsonOutputParser)); + }); + + it('should use JsonOutputParser for Ollama models with json format', async () => { + const fakeOllamaModel = new FakeChatModel({}); + (fakeOllamaModel as unknown as { format: string }).format = 'json'; + + const mockPromptTemplate = new PromptTemplate({ + template: '{query}', + inputVariables: ['query'], + }); + + const mockChain = { + invoke: jest.fn().mockResolvedValue('{"result": "json data"}'), + }; + + const withConfigMock = jest.fn().mockReturnValue(mockChain); + const pipeOutputParserMock = jest.fn().mockReturnValue({ + withConfig: withConfigMock, + }); + + mockPromptTemplate.pipe = jest.fn().mockReturnValue({ + pipe: pipeOutputParserMock, + }); + + (promptUtils.createPromptTemplate as jest.Mock).mockResolvedValue(mockPromptTemplate); + + await executeChain({ + context: mockContext, + itemIndex: 0, + query: 'Hello', + llm: fakeOllamaModel, + }); + + expect(pipeOutputParserMock).toHaveBeenCalledWith(expect.any(JsonOutputParser)); + }); }); }); diff --git a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/test/responseFormatter.test.ts b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/test/responseFormatter.test.ts index ec8cf598584bc..08bc587d05c1a 100644 --- a/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/test/responseFormatter.test.ts +++ b/packages/@n8n/nodes-langchain/nodes/chains/ChainLLM/test/responseFormatter.test.ts @@ -3,38 +3,34 @@ import { formatResponse } from '../methods/responseFormatter'; describe('responseFormatter', () => { describe('formatResponse', () => { it('should format string responses', () => { - const result = formatResponse('Test response'); + const result = formatResponse('Test response', 1.6); expect(result).toEqual({ - response: { - text: 'Test response', - }, + text: 'Test response', }); }); it('should trim string responses', () => { - const result = formatResponse(' Test response with whitespace '); + const result = formatResponse(' Test response with whitespace ', 1.6); expect(result).toEqual({ - response: { - text: 'Test response with whitespace', - }, + text: 'Test response with whitespace', }); }); it('should handle array responses', () => { const testArray = [{ item: 1 }, { item: 2 }]; - const result = formatResponse(testArray); + const result = formatResponse(testArray, 1.6); expect(result).toEqual({ data: testArray }); }); it('should handle object responses', () => { const testObject = { key: 'value', nested: { key: 'value' } }; - const result = formatResponse(testObject); + const result = formatResponse(testObject, 1.6); expect(result).toEqual(testObject); }); it('should handle primitive non-string responses', () => { const testNumber = 42; - const result = formatResponse(testNumber); + const result = formatResponse(testNumber, 1.6); expect(result).toEqual({ response: { text: 42, From 658da95dde9301d6b0ff6f7d720e6376ab07f5a4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 14:55:06 +0100 Subject: [PATCH 2/2] :rocket: Release 1.85.3 (#14188) Co-authored-by: CharlieKolb <13814565+CharlieKolb@users.noreply.github.com> --- CHANGELOG.md | 9 +++++++++ package.json | 2 +- packages/@n8n/nodes-langchain/package.json | 2 +- packages/cli/package.json | 2 +- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f194c2fd1e35..53c0e3ca5f957 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +## [1.85.3](https://github.com/n8n-io/n8n/compare/n8n@1.85.2...n8n@1.85.3) (2025-03-26) + + +### Bug Fixes + +* **Basic LLM Chain Node:** Prevent incorrect wrapping of output ([#14183](https://github.com/n8n-io/n8n/issues/14183)) ([5e786e8](https://github.com/n8n-io/n8n/commit/5e786e89cb94dfd9008c4db75e456d69f1365cc0)) + + + ## [1.85.2](https://github.com/n8n-io/n8n/compare/n8n@1.85.1...n8n@1.85.2) (2025-03-25) diff --git a/package.json b/package.json index c812075f1c74d..ad14c665f7b3e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "n8n-monorepo", - "version": "1.85.2", + "version": "1.85.3", "private": true, "engines": { "node": ">=20.15", diff --git a/packages/@n8n/nodes-langchain/package.json b/packages/@n8n/nodes-langchain/package.json index ac1b71a34deeb..03da44f69d5c5 100644 --- a/packages/@n8n/nodes-langchain/package.json +++ b/packages/@n8n/nodes-langchain/package.json @@ -1,6 +1,6 @@ { "name": "@n8n/n8n-nodes-langchain", - "version": "1.85.1", + "version": "1.85.2", "description": "", "main": "index.js", "scripts": { diff --git a/packages/cli/package.json b/packages/cli/package.json index aed25e0390853..9bf3f87b44be5 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -1,6 +1,6 @@ { "name": "n8n", - "version": "1.85.2", + "version": "1.85.3", "description": "n8n Workflow Automation Tool", "main": "dist/index", "types": "dist/index.d.ts",