Skip to content

Commit 87ac9ad

Browse files
author
tomfrenken
committed
add sample code
1 parent 71ab5dc commit 87ac9ad

File tree

9 files changed

+79
-20
lines changed

9 files changed

+79
-20
lines changed

packages/langchain/src/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,5 @@ export type {
77
AzureOpenAiEmbeddingModelParams,
88
AzureOpenAiChatCallOptions
99
} from './openai/index.js';
10+
export { OrchestrationClient } from './orchestration/index.js';
11+
export type { OrchestrationCallOptions } from './orchestration/index.js';

packages/langchain/src/orchestration/chat.ts

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -18,17 +18,15 @@ import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager
1818
import type { OrchestrationCallOptions } from './types.js';
1919
import type { HttpDestinationOrFetchOptions } from '@sap-cloud-sdk/connectivity';
2020

21-
// TODO: Update all docs
22-
2321
/**
24-
* LangChain chat client for Azure OpenAI consumption on SAP BTP.
22+
* The Orchestration client.
2523
*/
2624
export class OrchestrationClient extends BaseChatModel<
2725
OrchestrationCallOptions,
2826
OrchestrationMessageChunk
2927
> {
3028
constructor(
31-
// Omit streaming until supported
29+
// TODO: Omit streaming until supported
3230
public orchestrationConfig: Omit<OrchestrationModuleConfig, 'streaming'>,
3331
public langchainOptions: BaseChatModelParams = {},
3432
public deploymentConfig?: ResourceGroupConfig,
@@ -45,8 +43,8 @@ export class OrchestrationClient extends BaseChatModel<
4543
* Decisions:
4644
* bind only supports ParsedCallOptions, we don't support arbitrary LLM options, only tool calls & default BaseLanguageModelCallOptions, e.g. stop ✅
4745
* this aligns with other vendors' client designs (e.g. openai, google) ✅
48-
* top of the array (array[array.length - 1]) contains the current message, everything before then is history.
49-
* Module results are part of our own message type, which extends AI Message to work with all other langchain functionality.
46+
* inputParams are a seperate call option, history = history
47+
* Module results are part of our own message type, which extends AI Message to work with all other langchain functionality. ✅.
5048
*
5149
* For timeout, we need to apply our own middleware, it is not handled by langchain. ✅.
5250
*/
@@ -71,7 +69,6 @@ export class OrchestrationClient extends BaseChatModel<
7169
signal: options.signal
7270
},
7371
() => {
74-
// consider this.tools & this.stop property, merge it with template orchestration config
7572
const { inputParams } = options;
7673
const mergedOrchestrationConfig =
7774
this.mergeOrchestrationConfig(options);
@@ -88,8 +85,6 @@ export class OrchestrationClient extends BaseChatModel<
8885
};
8986
return orchestrationClient.chatCompletion(
9087
{
91-
// how to handle tools here? doesn't really exist as input in orchestration as message history
92-
// make template a call option, to merge it ??
9388
messagesHistory,
9489
inputParams
9590
},
@@ -100,7 +95,7 @@ export class OrchestrationClient extends BaseChatModel<
10095

10196
const content = res.getContent();
10297

103-
// we currently do not support streaming
98+
// TODO: Add streaming as soon as we support it
10499
await runManager?.handleLLMNewToken(
105100
typeof content === 'string' ? content : ''
106101
);
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
export * from './chat.js';
2+
export * from './orchestration-message.js';
3+
export * from './orchestration-message-chunk.js';
4+
export * from './types.js';
5+
export * from './util.js';

packages/langchain/src/orchestration/orchestration-message-chunk.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@ import type { AIMessageChunkFields } from '@langchain/core/messages';
33
import type { ModuleResults } from '@sap-ai-sdk/orchestration';
44

55
/**
6-
* TODO: Add docs.
6+
* An AI Message Chunk containing module results and request ID.
7+
* @internal
78
*/
89
export class OrchestrationMessageChunk extends AIMessageChunk {
910
module_results: ModuleResults;

packages/langchain/src/orchestration/orchestration-message.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@ import type { AIMessageFields } from '@langchain/core/messages';
33
import type { ModuleResults } from '@sap-ai-sdk/orchestration';
44

55
/**
6-
* TODO: Add docs.
6+
* An AI Message containing module results and request ID.
7+
* @internal
78
*/
89
export class OrchestrationMessage extends AIMessage {
910
module_results: ModuleResults;

packages/langchain/src/orchestration/types.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import type { BaseChatModelCallOptions } from '@langchain/core/language_models/c
33
import type { CustomRequestConfig } from '@sap-ai-sdk/core';
44

55
/**
6-
* TODO: Add docs.
6+
* Options for an orchestration call.
77
*/
88
export type OrchestrationCallOptions = Pick<
99
BaseChatModelCallOptions,

packages/langchain/src/orchestration/util.ts

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,20 +19,19 @@ import type {
1919
* Checks if the object is a {@link Template}.
2020
* @param object - The object to check.
2121
* @returns True if the object is a {@link Template}.
22+
* @internal
2223
*/
2324
export function isTemplate(object: TemplatingModuleConfig): object is Template {
2425
return 'template' in object;
2526
}
2627

2728
/**
28-
* Maps {@link BaseMessage} to {@link AzureOpenAiChatMessage}.
29+
* Maps {@link BaseMessage} to {@link ChatMessage}.
2930
* @param message - The message to map.
30-
* @returns The {@link AzureOpenAiChatMessage}.
31+
* @returns The {@link ChatMessage}.
3132
*/
3233
// TODO: Add mapping of refusal property, once LangChain base class supports it natively.
33-
function mapBaseMessageToAzureOpenAiChatMessage(
34-
message: BaseMessage
35-
): ChatMessage {
34+
function mapBaseMessageToChatMessage(message: BaseMessage): ChatMessage {
3635
switch (message.getType()) {
3736
case 'ai':
3837
return mapAiMessageToAzureOpenAiAssistantMessage(message);
@@ -97,15 +96,15 @@ function mapSystemMessageToAzureOpenAiSystemMessage(
9796
}
9897

9998
/**
100-
* TODO: adjust
10199
* Maps LangChain messages to orchestration messages.
102100
* @param messages - The LangChain messages to map.
103101
* @returns The orchestration messages mapped from LangChain messages.
102+
* @internal
104103
*/
105104
export function mapLangchainMessagesToOrchestrationMessages(
106105
messages: BaseMessage[]
107106
): ChatMessage[] {
108-
return messages.map(mapBaseMessageToAzureOpenAiChatMessage);
107+
return messages.map(mapBaseMessageToChatMessage);
109108
}
110109

111110
/**
Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
import { StringOutputParser } from '@langchain/core/output_parsers';
2+
import { OrchestrationClient } from '@sap-ai-sdk/langchain';
3+
import type { BaseLanguageModelInput } from '@langchain/core/language_models/base';
4+
import type { Runnable } from '@langchain/core/runnables';
5+
import type { OrchestrationCallOptions } from '@sap-ai-sdk/langchain';
6+
7+
/**
8+
* Ask GPT about the capital of France, as part of a chain.
9+
* @returns The answer from ChatGPT.
10+
*/
11+
export async function invokeChain(): Promise<string> {
12+
const orchestrationConfig = {
13+
// define the language model to be used
14+
llm: {
15+
model_name: 'gpt-35-turbo',
16+
model_params: {}
17+
},
18+
// define the template
19+
templating: {
20+
template: [
21+
{
22+
role: 'user',
23+
content: 'Give me a long introduction of {{?input}}'
24+
}
25+
]
26+
}
27+
};
28+
29+
const callOptions = { inputParams: { input: 'SAP Cloud SDK' } };
30+
31+
// initialize the client
32+
const client = new OrchestrationClient(orchestrationConfig);
33+
34+
// create an output parser
35+
const parser = new StringOutputParser();
36+
37+
// chain together template, client, and parser
38+
const llmChain = client.pipe(parser) as Runnable<BaseLanguageModelInput, string, OrchestrationCallOptions>;
39+
40+
// invoke the chain
41+
return llmChain.invoke('My Message History', callOptions);
42+
}

sample-code/src/server.ts

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,9 @@ import {
4242
invoke,
4343
invokeToolChain
4444
} from './langchain-azure-openai.js';
45+
import {
46+
invokeChain as invokeChainOrchestration
47+
} from './langchain-orchestration.js';
4548
import {
4649
createCollection,
4750
createDocumentsWithTimestamp,
@@ -416,6 +419,17 @@ app.get('/langchain/invoke-chain', async (req, res) => {
416419
}
417420
});
418421

422+
app.get('/langchain/invoke-chain-orchestration', async (req, res) => {
423+
try {
424+
res.send(await invokeChainOrchestration());
425+
} catch (error: any) {
426+
console.error(error);
427+
res
428+
.status(500)
429+
.send('Yikes, vibes are off apparently 😬 -> ' + error.request.data);
430+
}
431+
});
432+
419433
app.get('/langchain/invoke-rag-chain', async (req, res) => {
420434
try {
421435
res.send(await invokeRagChain());

0 commit comments

Comments
 (0)