Skip to content

Commit b17650a

Browse files
committed
chore: test auto-streaming in a langgraph
1 parent 8858be7 commit b17650a

File tree

5 files changed

+148
-6
lines changed

5 files changed

+148
-6
lines changed

packages/langchain/package.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,5 +39,8 @@
3939
"@sap-cloud-sdk/connectivity": "^4.1.2",
4040
"@sap-cloud-sdk/util": "^4.1.2",
4141
"uuid": "^13.0.0"
42+
},
43+
"devDependencies": {
44+
"@langchain/langgraph": "^1.0.1"
4245
}
4346
}

packages/langchain/src/openai/chat.test.ts

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,12 @@ import { apiVersion } from '@sap-ai-sdk/foundation-models/internal.js';
33
import { toJsonSchema } from '@langchain/core/utils/json_schema';
44
import { getSchemaDescription } from '@langchain/core/utils/types';
55
import { jest } from '@jest/globals';
6+
import {
7+
START,
8+
END,
9+
MessagesAnnotation,
10+
StateGraph
11+
} from '@langchain/langgraph';
612
import { addNumbersTool, joke } from '../../../../test-util/tools.js';
713
import {
814
mockClientCredentialsGrantCall,
@@ -609,5 +615,58 @@ describe('Chat client', () => {
609615
});
610616
expect(finalOutput).toMatchSnapshot();
611617
});
618+
it('streams when invoked in a streaming langgraph', async () => {
619+
mockInference(
620+
{
621+
data: {
622+
messages: [
623+
{
624+
role: 'user',
625+
content: 'Hello!'
626+
}
627+
],
628+
stream: true,
629+
stream_options: {
630+
include_usage: true
631+
}
632+
}
633+
},
634+
{
635+
data: mockResponseStream,
636+
status: 200
637+
},
638+
endpoint
639+
);
640+
jest.spyOn(AzureOpenAiChatClient.prototype, '_streamResponseChunks');
641+
// Simulate a minimal streaming langgraph-like workflow
642+
const llm = new AzureOpenAiChatClient({ modelName: 'gpt-4o' });
643+
644+
// Simulate a node function that calls the model using invoke
645+
const callModel = async (state: { messages: any }) => {
646+
const messages = await llm.invoke(state.messages);
647+
return { messages };
648+
};
649+
650+
// Define a new graph
651+
const workflow = new StateGraph(MessagesAnnotation)
652+
// Define the (single) node in the graph
653+
.addNode('model', callModel)
654+
.addEdge(START, 'model')
655+
.addEdge('model', END);
656+
657+
const app = workflow.compile();
658+
const stream = await app.stream(
659+
{ messages: [{ role: 'user', content: 'Hello!' }] },
660+
// langgraph will only enable streaming in a granular streaming mode
661+
{ streamMode: 'messages' as const }
662+
);
663+
664+
let finalOutput;
665+
for await (const chunk of stream) {
666+
finalOutput =
667+
finalOutput !== undefined ? finalOutput.concat(chunk) : chunk;
668+
}
669+
expect(llm._streamResponseChunks).toHaveBeenCalled();
670+
});
612671
});
613672
});

packages/langchain/src/orchestration/client.test.ts

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,12 @@
11
import { constructCompletionPostRequest } from '@sap-ai-sdk/orchestration/internal.js';
22
import { jest } from '@jest/globals';
33
import nock from 'nock';
4+
import {
5+
START,
6+
END,
7+
MessagesAnnotation,
8+
StateGraph
9+
} from '@langchain/langgraph';
410
import { type AIMessageChunk } from '@langchain/core/messages';
511
import {
612
mockClientCredentialsGrantCall,
@@ -705,4 +711,63 @@ describe('orchestration service client', () => {
705711
expect(finalOutput).toMatchSnapshot();
706712
});
707713
});
714+
715+
it('streams when invoked in a streaming langgraph', async () => {
716+
mockInference(
717+
{
718+
data: constructCompletionPostRequest(
719+
{
720+
...config,
721+
promptTemplating: {
722+
...config.promptTemplating,
723+
prompt: {
724+
template: messages
725+
}
726+
}
727+
},
728+
{ messages: [] },
729+
true
730+
)
731+
},
732+
{
733+
data: mockResponseStream,
734+
status: 200
735+
},
736+
endpoint
737+
);
738+
jest.spyOn(OrchestrationClient.prototype, '_streamResponseChunks');
739+
740+
const llm = new OrchestrationClient(config);
741+
742+
// Define the function that calls the model
743+
const callModel = async (state: typeof MessagesAnnotation.State) => {
744+
const response = await llm.invoke(state.messages);
745+
// Update message history with response:
746+
return { messages: response };
747+
};
748+
749+
// Define a new graph
750+
const workflow = new StateGraph(MessagesAnnotation)
751+
// Define the (single) node in the graph
752+
.addNode('model', callModel)
753+
.addEdge(START, 'model')
754+
.addEdge('model', END);
755+
756+
const app = workflow.compile();
757+
const stream = await app.stream(
758+
{
759+
messages
760+
},
761+
// langgraph will only enable streaming in a granular streaming mode
762+
{ streamMode: 'messages' as const }
763+
);
764+
765+
let finalOutput;
766+
for await (const chunk of stream) {
767+
finalOutput =
768+
finalOutput !== undefined ? finalOutput.concat(chunk) : chunk;
769+
}
770+
771+
expect(llm._streamResponseChunks).toHaveBeenCalled();
772+
});
708773
});

pnpm-lock.yaml

Lines changed: 4 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

sample-code/src/langchain-orchestration.ts

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,10 @@ export async function invokeLangGraphChain(): Promise<string> {
207207
export async function invokeLangGraphChainStream(): Promise<string> {
208208
const app = createLangGraphApp();
209209

210-
const config = { configurable: { thread_id: uuidv4() } };
210+
const config = {
211+
configurable: { thread_id: uuidv4() },
212+
streamMode: 'messages' as const
213+
};
211214
const input = [
212215
{
213216
role: 'user',
@@ -216,10 +219,14 @@ export async function invokeLangGraphChainStream(): Promise<string> {
216219
];
217220
const stream = await app.stream({ messages: input }, config);
218221

219-
let firstResponse = '';
222+
let firstResponse;
220223
for await (const chunk of stream) {
221-
firstResponse += chunk.model?.messages.content;
224+
firstResponse =
225+
firstResponse === undefined ? chunk : firstResponse.concat(chunk);
222226
}
227+
const firstResponseStr = firstResponse!
228+
.map(chunk => chunk?.content ?? '')
229+
.join('');
223230

224231
const input2 = [
225232
{
@@ -229,12 +236,16 @@ export async function invokeLangGraphChainStream(): Promise<string> {
229236
];
230237
const stream2 = await app.stream({ messages: input2 }, config);
231238

232-
let secondResponse = '';
239+
let secondResponse;
233240
for await (const chunk of stream2) {
234-
secondResponse += chunk.model?.messages.content;
241+
secondResponse =
242+
secondResponse === undefined ? chunk : secondResponse.concat(chunk);
235243
}
244+
const secondResponseStr = secondResponse!
245+
.map(chunk => chunk?.content ?? '')
246+
.join('');
236247

237-
return `${firstResponse}\n\n${secondResponse}`;
248+
return `${firstResponseStr}\n\n${secondResponseStr}`;
238249
}
239250

240251
/**

0 commit comments

Comments
 (0)