Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit 6570946

Browse files
update translation
1 parent 556b433 commit 6570946

File tree

1 file changed

+37
-21
lines changed

1 file changed

+37
-21
lines changed

‎pages/api/examples/protected.ts‎

Lines changed: 37 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,19 @@ import { getSession } from "next-auth/react"
33
import type { NextApiRequest, NextApiResponse } from "next"
44
import rateLimit from "../../../utils/rate-limit"
55
import { env } from "process"
6+
import { MongoClient } from "mongodb"
7+
8+
const options = {
9+
useUnifiedTopology: true,
10+
useNewUrlParser: true,
11+
}
12+
const client = new MongoClient(process.env.MONGO_URI!)
13+
14+
interface Userpromt {
15+
input: string
16+
output: string
17+
createdAt: string
18+
}
619

720
const limiter = rateLimit({
821
interval: 60 * 1000, // 60 seconds
@@ -11,20 +24,22 @@ const limiter = rateLimit({
1124

1225
const { Configuration, OpenAIApi } = require("openai")
1326

14-
const configuration = new Configuration({
15-
apiKey: env.OPENAI_API_KEY,
16-
})
17-
const openai = new OpenAIApi(configuration)
18-
1927
export default async (req: NextApiRequest, res: NextApiResponse) => {
20-
await limiter.check(res, 4, "CACHE_TOKEN") // 8 requests per minute
28+
await limiter.check(res, 20, "CACHE_TOKEN") // 20 requests per minute
29+
30+
let configuration = new Configuration({
31+
apiKey: process.env.OPENAI_API_KEY,
32+
})
33+
let openai = new OpenAIApi(configuration)
2134

2235
const session = await getSession({ req })
2336

2437
//console.log(req.body)
2538
//console.log(req.body.textup)
2639
//console.log(req.body.selectedOption.value)
2740

41+
console.log(session)
42+
2843
console.log("content length", req.body.textup.length)
2944
if (req.body.textup.length > 1000) {
3045
res.status(400).json({
@@ -37,8 +52,8 @@ export default async (req: NextApiRequest, res: NextApiResponse) => {
3752
const { user } = session
3853

3954
openai
40-
.createCompletion("content-filter-alpha",{
41-
//text-davinci-002,
55+
.createCompletion({
56+
model: "content-filter-alpha",
4257
prompt: "<|endoftext|>" + req.body.textup + "\n--\nLabel:",
4358
temperature: 0,
4459
max_tokens: 1,
@@ -52,28 +67,29 @@ export default async (req: NextApiRequest, res: NextApiResponse) => {
5267

5368
console.log("usermail:", user?.email)
5469

55-
// add sending user id to the request
70+
configuration = new Configuration({
71+
apiKey: process.env.OPENAI_API_KEY_CODEX,
72+
})
73+
openai = new OpenAIApi(configuration)
74+
5675
openai
57-
.createCompletion("text-curie-001",{
58-
//text-davinci-002,
76+
.createCompletion({
77+
model: "code-davinci-002",
5978
prompt:
60-
"Translate this function into " +
61-
req.body.selectedOption.value +
62-
" \n \n \n " +
63-
req.body.textup +
64-
" \n \n " +
65-
req.body.selectedOption.value +
66-
": \n\n",
67-
temperature: 0.7,
79+
"Code: " + req.body.textup +
80+
81+
"\n Translation to " + req.body.selectedOption.value + ":\n",
82+
temperature: 0.4,
6883
max_tokens: 250,
6984
top_p: 1,
70-
frequency_penalty: 0,
85+
frequency_penalty: 0.3,
7186
presence_penalty: 0,
7287
user: user?.email,
7388
})
74-
.then((response: any) => {
89+
.then(async(response: any) => {
7590
console.log(response.data.choices[0].text)
7691
//res.status(200).json(response.data)
92+
console.log("Response:", response.data.choices[0])
7793
try {
7894
res.status(200).json({ data: response.data.choices[0].text })
7995
} catch (err) {

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /