11import { BaseChatModel } from '@langchain/core/language_models/chat_models' ;
22import { OrchestrationClient as OrchestrationClientBase } from '@sap-ai-sdk/orchestration' ;
33import { AsyncCaller } from '@langchain/core/utils/async_caller' ;
4- import { mapLangchainMessagesToOrchestrationMessages , mapOutputToChatResult } from './util.js' ;
4+ import { resilience } from '@sap-cloud-sdk/resilience' ;
5+ import {
6+ isTemplate ,
7+ mapLangchainMessagesToOrchestrationMessages ,
8+ mapOutputToChatResult
9+ } from './util.js' ;
10+ import type { CustomRequestConfig } from '@sap-ai-sdk/core' ;
511import type { OrchestrationMessageChunk } from './orchestration-message-chunk.js' ;
612import type { ChatResult } from '@langchain/core/outputs' ;
713import type { OrchestrationModuleConfig } from '@sap-ai-sdk/orchestration' ;
814import type { BaseChatModelParams } from '@langchain/core/language_models/chat_models' ;
915import type { ResourceGroupConfig } from '@sap-ai-sdk/ai-api' ;
1016import type { BaseMessage } from '@langchain/core/messages' ;
1117import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager' ;
12- import type {
13- OrchestrationCallOptions
14- } from './types.js' ;
18+ import type { OrchestrationCallOptions } from './types.js' ;
1519import type { HttpDestinationOrFetchOptions } from '@sap-cloud-sdk/connectivity' ;
1620
1721// TODO: Update all docs
1822
1923/**
2024 * LangChain chat client for Azure OpenAI consumption on SAP BTP.
2125 */
22- export class OrchestrationClient extends BaseChatModel < OrchestrationCallOptions , OrchestrationMessageChunk > {
26+ export class OrchestrationClient extends BaseChatModel <
27+ OrchestrationCallOptions ,
28+ OrchestrationMessageChunk
29+ > {
2330 constructor (
2431 // Omit streaming until supported
2532 public orchestrationConfig : Omit < OrchestrationModuleConfig , 'streaming' > ,
2633 public langchainOptions : BaseChatModelParams = { } ,
2734 public deploymentConfig ?: ResourceGroupConfig ,
28- public destination ?: HttpDestinationOrFetchOptions ,
35+ public destination ?: HttpDestinationOrFetchOptions
2936 ) {
3037 super ( langchainOptions ) ;
3138 }
@@ -36,12 +43,12 @@ export class OrchestrationClient extends BaseChatModel<OrchestrationCallOptions,
3643
3744 /**
3845 * Decisions:
39- * bind only supports ParsedCallOptions, we don't support arbitrary LLM options, only tool calls & default BaseLanguageModelCallOptions, e.g. stop
40- * this aligns with other vendors' client designs (e.g. openai, google)
46+ * bind only supports ParsedCallOptions, we don't support arbitrary LLM options, only tool calls & default BaseLanguageModelCallOptions, e.g. stop ✅
47+ * this aligns with other vendors' client designs (e.g. openai, google) ✅
4148 * top of the array (array[array.length - 1]) contains the current message, everything before then is history.
4249 * Module results are part of our own message type, which extends AI Message to work with all other langchain functionality.
4350 *
44- * For timeout, we need to apply our own middleware, it is not handled by langchain.
51+ * For timeout, we need to apply our own middleware, it is not handled by langchain. ✅.
4552 */
4653
4754 override async _generate (
@@ -50,29 +57,44 @@ export class OrchestrationClient extends BaseChatModel<OrchestrationCallOptions,
5057 runManager ?: CallbackManagerForLLMRun
5158 ) : Promise < ChatResult > {
5259 let caller = this . caller ;
53- if ( options . maxConcurrency ) {
54- const { maxConcurrency, maxRetries, onFailedAttempt } = this . langchainOptions ;
55- caller = new AsyncCaller (
56- { maxConcurrency : maxConcurrency ?? options . maxConcurrency ,
57- maxRetries ,
58- onFailedAttempt
59- }
60- ) ;
60+ if ( options . maxConcurrency ) {
61+ const { maxConcurrency, maxRetries, onFailedAttempt } =
62+ this . langchainOptions ;
63+ caller = new AsyncCaller ( {
64+ maxConcurrency : maxConcurrency ?? options . maxConcurrency ,
65+ maxRetries ,
66+ onFailedAttempt
67+ } ) ;
6168 }
6269 const res = await caller . callWithOptions (
6370 {
6471 signal : options . signal
6572 } ,
6673 ( ) => {
67- // consider this.tools & this.stop property, merge it ith template orchestration config
68- const orchestrationClient = new OrchestrationClientBase ( this . orchestrationConfig , this . deploymentConfig , this . destination ) ;
69- const { messageHistory, inputParams } = mapLangchainMessagesToOrchestrationMessages ( messages ) ;
70- return orchestrationClient . chatCompletion ( {
71- // how to handle tools here? doesn't really exist as input in orchestration as message history
72- // make template a call option, to merge it ??
73- messagesHistory,
74- inputParams
75- } , options . customRequestConfig ) ;
74+ // consider this.tools & this.stop property, merge it with template orchestration config
75+ const { inputParams } = options ;
76+ const mergedOrchestrationConfig =
77+ this . mergeOrchestrationConfig ( options ) ;
78+ const orchestrationClient = new OrchestrationClientBase (
79+ mergedOrchestrationConfig ,
80+ this . deploymentConfig ,
81+ this . destination
82+ ) ;
83+ const messagesHistory =
84+ mapLangchainMessagesToOrchestrationMessages ( messages ) ;
85+ const customRequestConfig : CustomRequestConfig = {
86+ ...options . customRequestConfig ,
87+ middleware : resilience ( { timeout : options . timeout } )
88+ } ;
89+ return orchestrationClient . chatCompletion (
90+ {
91+ // how to handle tools here? doesn't really exist as input in orchestration as message history
92+ // make template a call option, to merge it ??
93+ messagesHistory,
94+ inputParams
95+ } ,
96+ customRequestConfig
97+ ) ;
7698 }
7799 ) ;
78100
@@ -85,5 +107,35 @@ export class OrchestrationClient extends BaseChatModel<OrchestrationCallOptions,
85107
86108 return mapOutputToChatResult ( res . data ) ;
87109 }
88- }
89110
111+ private mergeOrchestrationConfig (
112+ options : typeof this . ParsedCallOptions
113+ ) : OrchestrationModuleConfig {
114+ const { tools = [ ] , stop = [ ] } = options ;
115+ return {
116+ ...this . orchestrationConfig ,
117+ llm : {
118+ ...this . orchestrationConfig . llm ,
119+ model_params : {
120+ ...this . orchestrationConfig . llm . model_params ,
121+ ...( stop . length && {
122+ stop : [
123+ ...( this . orchestrationConfig . llm . model_params ?. stop || [ ] ) ,
124+ ...stop
125+ ]
126+ } )
127+ }
128+ } ,
129+ templating : {
130+ ...this . orchestrationConfig . templating ,
131+ ...( isTemplate ( this . orchestrationConfig . templating ) &&
132+ tools . length && {
133+ tools : [
134+ ...( this . orchestrationConfig . templating . tools || [ ] ) ,
135+ ...tools
136+ ]
137+ } )
138+ }
139+ } ;
140+ }
141+ }
0 commit comments