1
- const https = require ( ' https' ) ;
1
+ const https = require ( " https" ) ;
2
2
3
- // Example dummy function hard coded to return the same weather
4
- // In production, this could be your backend API or an external API
5
- function get_current_weather ( location , unit = 'celsius' ) {
6
- const weather_info = {
3
+ function getCurrentWeather ( location , unit = "fahrenheit" ) {
4
+ let weather_info = {
7
5
location : location ,
8
- temperature : '23' ,
6
+ temperature : "unknown" ,
9
7
unit : unit ,
10
- forecast : [ 'sunny' , 'windy' ] ,
11
8
} ;
9
+
10
+ if ( location . toLowerCase ( ) . includes ( "tokyo" ) ) {
11
+ weather_info = { location : "Tokyo" , temperature : "10" , unit : "celsius" } ;
12
+ } else if ( location . toLowerCase ( ) . includes ( "san francisco" ) ) {
13
+ weather_info = {
14
+ location : "San Francisco" ,
15
+ temperature : "72" ,
16
+ unit : "fahrenheit" ,
17
+ } ;
18
+ } else if ( location . toLowerCase ( ) . includes ( "paris" ) ) {
19
+ weather_info = { location : "Paris" , temperature : "22" , unit : "fahrenheit" } ;
20
+ }
21
+
12
22
return JSON . stringify ( weather_info ) ;
13
23
}
14
-
15
24
async function runConversation ( ) {
16
- // Step 1: send the conversation and available functions to GPT
17
- const messages = [ { role : 'user' , content : "weather in melbourne" } ] ;
18
- const functions = [
25
+ const messages = [
19
26
{
20
- name : 'get_current_weather' ,
21
- description : 'Get the current weather in a given location' ,
22
- parameters : {
23
- type : 'object' ,
24
- properties : {
25
- location : {
26
- type : 'string' ,
27
- description : 'The city and state, e.g. San Francisco, CA' ,
27
+ role : "user" ,
28
+ content : "What's the weather like in San Francisco, Tokyo, and Paris?" ,
29
+ } ,
30
+ ] ;
31
+ const tools = [
32
+ {
33
+ type : "function" ,
34
+ function : {
35
+ name : "get_current_weather" ,
36
+ description : "Get the current weather in a given location" ,
37
+ parameters : {
38
+ type : "object" ,
39
+ properties : {
40
+ location : {
41
+ type : "string" ,
42
+ description : "The city and state, e.g. San Francisco, CA" ,
43
+ } ,
44
+ unit : { type : "string" , enum : [ "celsius" , "fahrenheit" ] } ,
28
45
} ,
29
- unit : { type : 'string' , enum : [ 'celsius' , 'fahrenheit' ] } ,
46
+ required : [ "location" ] ,
30
47
} ,
31
- required : [ 'location' ] ,
32
48
} ,
33
49
} ,
34
50
] ;
35
51
36
52
const requestData = JSON . stringify ( {
37
- model : ' gpt-3.5-turbo' ,
53
+ model : " gpt-3.5-turbo" ,
38
54
messages : messages ,
39
- functions : functions ,
40
- function_call : ' auto' , // auto is default, but we'll be explicit
55
+ tools : tools ,
56
+ tool_choice : " auto" ,
41
57
} ) ;
42
58
43
59
const options = {
44
- hostname : ' api.openai.com' ,
45
- path : ' /v1/chat/completions' ,
46
- method : ' POST' ,
60
+ hostname : " api.openai.com" ,
61
+ path : " /v1/chat/completions" ,
62
+ method : " POST" ,
47
63
headers : {
48
- ' Content-Type' : ' application/json' ,
49
- ' Authorization' : ' Bearer sk-xxxxxxxxx' , // Replace with your OpenAI API key
64
+ " Content-Type" : " application/json" ,
65
+ Authorization : " Bearer sk-xxxxxxxxx" , // Replace with your OpenAI API key
50
66
} ,
51
67
} ;
52
68
53
69
const response = await new Promise ( ( resolve , reject ) => {
54
70
const req = https . request ( options , ( res ) => {
55
- let data = '' ;
71
+ let data = "" ;
56
72
57
- res . on ( ' data' , ( chunk ) => {
73
+ res . on ( " data" , ( chunk ) => {
58
74
data += chunk ;
59
75
} ) ;
60
76
61
- res . on ( ' end' , ( ) => {
77
+ res . on ( " end" , ( ) => {
62
78
resolve ( JSON . parse ( data ) ) ;
63
79
} ) ;
64
80
} ) ;
65
81
66
- req . on ( ' error' , ( error ) => {
82
+ req . on ( " error" , ( error ) => {
67
83
reject ( error ) ;
68
84
} ) ;
69
85
@@ -73,60 +89,67 @@ async function runConversation() {
73
89
74
90
const responseMessage = response . choices [ 0 ] . message ;
75
91
76
- // Step 2: check if GPT wanted to call a function
77
- if ( responseMessage . function_call ) {
78
- // Step 3: call the function
92
+ if ( responseMessage . tool_calls ) {
93
+ const toolCalls = responseMessage . tool_calls ;
79
94
const availableFunctions = {
80
- get_current_weather : get_current_weather ,
95
+ get_current_weather : getCurrentWeather ,
81
96
} ;
82
- const functionName = responseMessage . function_call . name ;
83
- const functionToCall = availableFunctions [ functionName ] ;
84
- const functionArgs = JSON . parse ( responseMessage . function_call . arguments ) ;
85
- const functionResponse = functionToCall (
86
- functionArgs . location ,
87
- functionArgs . unit
97
+ messages . push ( responseMessage ) ;
98
+
99
+ const functionResponses = await Promise . all (
100
+ toolCalls . map ( async ( toolCall ) => {
101
+ const functionName = toolCall . function . name ;
102
+ const functionArgs = JSON . parse ( toolCall . function . arguments ) ;
103
+ const functionToCall = availableFunctions [ functionName ] ;
104
+ const functionResponse = functionToCall (
105
+ functionArgs . location ,
106
+ functionArgs . unit
107
+ ) ;
108
+ return {
109
+ tool_call_id : toolCall . id ,
110
+ role : "tool" ,
111
+ name : functionName ,
112
+ content : functionResponse ,
113
+ } ;
114
+ } )
88
115
) ;
89
116
90
- // Step 4: send the info on the function call and function response to GPT
91
- messages . push ( responseMessage ) ; // extend conversation with assistant's reply
92
- messages . push ( {
93
- role : 'function' ,
94
- name : functionName ,
95
- content : functionResponse ,
96
- } ) ; // extend conversation with function response
97
-
117
+ messages . push ( ...functionResponses ) ;
118
+
98
119
const secondRequestData = JSON . stringify ( {
99
- model : ' gpt-3.5-turbo' ,
120
+ model : " gpt-3.5-turbo" ,
100
121
messages : messages ,
101
122
} ) ;
102
123
103
124
const secondResponse = await new Promise ( ( resolve , reject ) => {
104
125
const req = https . request ( options , ( res ) => {
105
- let data = '' ;
126
+ let data = "" ;
106
127
107
- res . on ( ' data' , ( chunk ) => {
128
+ res . on ( " data" , ( chunk ) => {
108
129
data += chunk ;
109
130
} ) ;
110
131
111
- res . on ( ' end' , ( ) => {
132
+ res . on ( " end" , ( ) => {
112
133
resolve ( JSON . parse ( data ) ) ;
113
134
} ) ;
114
135
} ) ;
115
136
116
- req . on ( ' error' , ( error ) => {
137
+ req . on ( " error" , ( error ) => {
117
138
reject ( error ) ;
118
139
} ) ;
119
140
120
141
req . write ( secondRequestData ) ;
121
142
req . end ( ) ;
122
- } ) ;
143
+ } ) ;
144
+
123
145
return secondResponse ;
124
146
}
125
147
}
126
148
127
149
runConversation ( )
128
150
. then ( ( response ) => {
129
- console . log ( response . choices [ 0 ] . message . content ) ;
151
+ const messageContent = response . choices [ 0 ] . message . content ;
152
+ console . log ( messageContent ) ;
130
153
} )
131
154
. catch ( ( error ) => {
132
155
console . error ( error ) ;
0 commit comments