Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit 5cfd4b5

Browse files
update time complexity
1 parent 3b6eba6 commit 5cfd4b5

File tree

1 file changed

+36
-26
lines changed

1 file changed

+36
-26
lines changed

‎pages/api/examples/time-complexity.ts‎

Lines changed: 36 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,19 @@ import { getSession } from "next-auth/react"
33
import type { NextApiRequest, NextApiResponse } from "next"
44
import rateLimit from "../../../utils/rate-limit"
55
import { env } from "process"
6+
import { MongoClient } from "mongodb"
7+
8+
const options = {
9+
useUnifiedTopology: true,
10+
useNewUrlParser: true,
11+
}
12+
const client = new MongoClient(process.env.MONGO_URI!)
13+
14+
interface Userpromt {
15+
input: string
16+
output: string
17+
createdAt: string
18+
}
619

720
const limiter = rateLimit({
821
interval: 60 * 1000, // 60 seconds
@@ -11,27 +24,21 @@ const limiter = rateLimit({
1124

1225
const { Configuration, OpenAIApi } = require("openai")
1326

14-
const configuration = new Configuration({
15-
apiKey: env.OPENAI_API_KEY,
16-
})
17-
const openai = new OpenAIApi(configuration)
18-
1927
export default async (req: NextApiRequest, res: NextApiResponse) => {
20-
await limiter.check(res, 4, "CACHE_TOKEN") // 8 requests per minute
28+
await limiter.check(res, 20, "CACHE_TOKEN") // 20 requests per minute
29+
30+
let configuration = new Configuration({
31+
apiKey: process.env.OPENAI_API_KEY,
32+
})
33+
let openai = new OpenAIApi(configuration)
2134

2235
const session = await getSession({ req })
2336

2437
//console.log(req.body)
2538
//console.log(req.body.textup)
2639
//console.log(req.body.selectedOption.value)
2740

28-
console.log(
29-
"#### Generate a fucntion in " +
30-
" that does the following: " +
31-
req.body.textup +
32-
" \n \n ### " +
33-
"\n\n"
34-
)
41+
console.log(session)
3542

3643
console.log("content length", req.body.textup.length)
3744
if (req.body.textup.length > 1000) {
@@ -45,8 +52,8 @@ export default async (req: NextApiRequest, res: NextApiResponse) => {
4552
const { user } = session
4653

4754
openai
48-
.createCompletion("content-filter-alpha",{
49-
//text-davinci-002,
55+
.createCompletion({
56+
model: "content-filter-alpha",
5057
prompt: "<|endoftext|>" + req.body.textup + "\n--\nLabel:",
5158
temperature: 0,
5259
max_tokens: 1,
@@ -60,26 +67,29 @@ export default async (req: NextApiRequest, res: NextApiResponse) => {
6067

6168
console.log("usermail:", user?.email)
6269

63-
// add sending user id to the request
70+
configuration = new Configuration({
71+
apiKey: process.env.OPENAI_API_KEY_CODEX,
72+
})
73+
openai = new OpenAIApi(configuration)
74+
6475
openai
65-
.createCompletion("text-curie-001",{
66-
//text-davinci-002,
76+
.createCompletion({
77+
model: "code-davinci-002",
6778
prompt:
68-
"What is the time complexity of this function? " +
69-
" \n### \n \n " +
79+
" Time complexity Big O Notation of: \n " +
7080
req.body.textup +
71-
" \n \n The time complexity is " +
72-
"\n\n",
73-
temperature: 0.7,
81+
" \n Big O: ",
82+
temperature: 0 ,
7483
max_tokens: 250,
7584
top_p: 1,
76-
frequency_penalty: 0,
77-
presence_penalty: 0,
85+
frequency_penalty: 0.7,
86+
presence_penalty: 0.2,
7887
user: user?.email,
7988
})
80-
.then((response: any) => {
89+
.then(async(response: any) => {
8190
console.log(response.data.choices[0].text)
8291
//res.status(200).json(response.data)
92+
console.log("Response:", response.data.choices[0])
8393
try {
8494
res.status(200).json({ data: response.data.choices[0].text })
8595
} catch (err) {

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /