Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit 2efc5ff

Browse files
committed
chore
1 parent 5029545 commit 2efc5ff

File tree

57 files changed

+952
-326
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+952
-326
lines changed

‎scripts/10/0.sh‎

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,18 @@
11
device="cuda:0"
22

3-
for i in 0 1 2; do
4-
for model_name in studio-ousia/luke-japanese-large-lite studio-ousia/luke-japanese-base-lite; do
5-
for lr in 1e-5 3e-5 5e-5; do
6-
for batch_size in 512; do
3+
for model_name in nlp-waseda/roberta-large-japanese ku-nlp/deberta-v2-large-japanese; do
4+
for lr in 1e-5 3e-5 5e-5; do
5+
for batch_size in 64 128 256 512; do
6+
for dataset_name in nu-snli nu-mnli nu-snli+mnli; do
77
poetry run python src/train_sup.py \
8-
--dataset_name jsnli+nu-snli \
8+
--dataset_name $dataset_name \
99
--model_name $model_name \
1010
--batch_size $batch_size \
1111
--lr $lr \
12+
--use_jumanpp \
1213
--gradient_checkpointing \
1314
--device $device
1415
done
15-
for batch_size in 256 128 64; do
16-
poetry run python src/train_sup.py \
17-
--dataset_name jsnli+nu-snli \
18-
--model_name $model_name \
19-
--batch_size $batch_size \
20-
--lr $lr \
21-
--device $device
22-
done
2316
done
2417
done
2518
done

‎scripts/10/1.sh‎

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,17 @@
11
device="cuda:1"
22

3-
for i in 0 1 2; do
4-
for model_name in cl-tohoku/bert-large-japanese ku-nlp/roberta-large-japanese-char-wwm; do
5-
for lr in 1e-5 3e-5 5e-5; do
6-
for batch_size in 512; do
3+
for model_name in studio-ousia/mluke-large-lite xlm-roberta-large; do
4+
for lr in 1e-5 3e-5 5e-5; do
5+
for batch_size in 64 128 256 512; do
6+
for dataset_name in nu-snli nu-mnli nu-snli+mnli; do
77
poetry run python src/train_sup.py \
8-
--dataset_name jsnli+nu-snli \
8+
--dataset_name $dataset_name \
99
--model_name $model_name \
1010
--batch_size $batch_size \
1111
--lr $lr \
1212
--gradient_checkpointing \
1313
--device $device
1414
done
15-
for batch_size in 256 128 64; do
16-
poetry run python src/train_sup.py \
17-
--dataset_name jsnli+nu-snli \
18-
--model_name $model_name \
19-
--batch_size $batch_size \
20-
--lr $lr \
21-
--device $device
22-
done
2315
done
2416
done
2517
done

‎scripts/10/2.sh‎

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,17 @@
11
device="cuda:2"
22

3-
for i in 0 1 2; do
4-
for model_name in nlp-waseda/roberta-large-japanese ku-nlp/deberta-v2-large-japanese; do
5-
for lr in 1e-5 3e-5 5e-5; do
6-
for batch_size in 512; do
3+
for model_name in cl-tohoku/bert-large-japanese studio-ousia/luke-japanese-large-lite; do
4+
for lr in 1e-5 3e-5 5e-5; do
5+
for batch_size in 64 128 256 512; do
6+
for dataset_name in nu-snli nu-mnli nu-snli+mnli; do
77
poetry run python src/train_sup.py \
8-
--dataset_name jsnli+nu-snli \
8+
--dataset_name $dataset_name \
99
--model_name $model_name \
1010
--batch_size $batch_size \
1111
--lr $lr \
12-
--use_jumanpp \
1312
--gradient_checkpointing \
1413
--device $device
1514
done
16-
for batch_size in 256 128 64; do
17-
poetry run python src/train_sup.py \
18-
--dataset_name jsnli+nu-snli \
19-
--model_name $model_name \
20-
--batch_size $batch_size \
21-
--lr $lr \
22-
--use_jumanpp \
23-
--device $device
24-
done
2515
done
2616
done
2717
done

‎scripts/10/3.sh‎

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,17 @@
11
device="cuda:3"
22

3-
for i in 0 1 2; do
4-
for model_name in xlm-roberta-large studio-ousia/mluke-large-lite; do
5-
for lr in 1e-5 3e-5 5e-5; do
6-
for batch_size in 512; do
3+
for model_name in ku-nlp/roberta-large-japanese-char-wwm cl-tohoku/bert-base-japanese-char-v2; do
4+
for lr in 1e-5 3e-5 5e-5; do
5+
for batch_size in 64 128 256 512; do
6+
for dataset_name in nu-snli nu-mnli nu-snli+mnli; do
77
poetry run python src/train_sup.py \
8-
--dataset_name jsnli+nu-snli \
8+
--dataset_name $dataset_name \
99
--model_name $model_name \
1010
--batch_size $batch_size \
1111
--lr $lr \
1212
--gradient_checkpointing \
1313
--device $device
1414
done
15-
for batch_size in 256 128 64; do
16-
poetry run python src/train_sup.py \
17-
--dataset_name jsnli+nu-snli \
18-
--model_name $model_name \
19-
--batch_size $batch_size \
20-
--lr $lr \
21-
--device $device
22-
done
2315
done
2416
done
2517
done

‎scripts/10/prev/2023-03-21/0.sh‎

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
device="cuda:0"
2+
3+
for i in 0 1 2; do
4+
for model_name in studio-ousia/luke-japanese-large-lite studio-ousia/luke-japanese-base-lite; do
5+
for lr in 1e-5 3e-5 5e-5; do
6+
for batch_size in 512; do
7+
poetry run python src/train_sup.py \
8+
--dataset_name jsnli+nu-snli \
9+
--model_name $model_name \
10+
--batch_size $batch_size \
11+
--lr $lr \
12+
--gradient_checkpointing \
13+
--device $device
14+
done
15+
for batch_size in 256 128 64; do
16+
poetry run python src/train_sup.py \
17+
--dataset_name jsnli+nu-snli \
18+
--model_name $model_name \
19+
--batch_size $batch_size \
20+
--lr $lr \
21+
--device $device
22+
done
23+
done
24+
done
25+
done

‎scripts/10/prev/2023-03-21/1.sh‎

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
device="cuda:1"
2+
3+
for i in 0 1 2; do
4+
for model_name in cl-tohoku/bert-large-japanese ku-nlp/roberta-large-japanese-char-wwm; do
5+
for lr in 1e-5 3e-5 5e-5; do
6+
for batch_size in 512; do
7+
poetry run python src/train_sup.py \
8+
--dataset_name jsnli+nu-snli \
9+
--model_name $model_name \
10+
--batch_size $batch_size \
11+
--lr $lr \
12+
--gradient_checkpointing \
13+
--device $device
14+
done
15+
for batch_size in 256 128 64; do
16+
poetry run python src/train_sup.py \
17+
--dataset_name jsnli+nu-snli \
18+
--model_name $model_name \
19+
--batch_size $batch_size \
20+
--lr $lr \
21+
--device $device
22+
done
23+
done
24+
done
25+
done

‎scripts/10/prev/2023-03-21/2.sh‎

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
device="cuda:2"
2+
3+
for i in 0 1 2; do
4+
for model_name in nlp-waseda/roberta-large-japanese ku-nlp/deberta-v2-large-japanese; do
5+
for lr in 1e-5 3e-5 5e-5; do
6+
for batch_size in 512; do
7+
poetry run python src/train_sup.py \
8+
--dataset_name jsnli+nu-snli \
9+
--model_name $model_name \
10+
--batch_size $batch_size \
11+
--lr $lr \
12+
--use_jumanpp \
13+
--gradient_checkpointing \
14+
--device $device
15+
done
16+
for batch_size in 256 128 64; do
17+
poetry run python src/train_sup.py \
18+
--dataset_name jsnli+nu-snli \
19+
--model_name $model_name \
20+
--batch_size $batch_size \
21+
--lr $lr \
22+
--use_jumanpp \
23+
--device $device
24+
done
25+
done
26+
done
27+
done

‎scripts/10/prev/2023-03-21/3.sh‎

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
device="cuda:3"
2+
3+
for i in 0 1 2; do
4+
for model_name in xlm-roberta-large studio-ousia/mluke-large-lite; do
5+
for lr in 1e-5 3e-5 5e-5; do
6+
for batch_size in 512; do
7+
poetry run python src/train_sup.py \
8+
--dataset_name jsnli+nu-snli \
9+
--model_name $model_name \
10+
--batch_size $batch_size \
11+
--lr $lr \
12+
--gradient_checkpointing \
13+
--device $device
14+
done
15+
for batch_size in 256 128 64; do
16+
poetry run python src/train_sup.py \
17+
--dataset_name jsnli+nu-snli \
18+
--model_name $model_name \
19+
--batch_size $batch_size \
20+
--lr $lr \
21+
--device $device
22+
done
23+
done
24+
done
25+
done

‎scripts/11/0.sh‎

Lines changed: 6 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,15 @@
11
device="cuda:0"
22

3-
for i in 0 1 2; do
4-
for model_name in studio-ousia/luke-japanese-large-lite studio-ousia/luke-japanese-base-lite; do
5-
for batch_size in 512; do
6-
for lr in 5e-5 3e-5 1e-5; do
3+
for model_name in nlp-waseda/roberta-large-japanese ku-nlp/deberta-v2-large-japanese; do
4+
for lr in 1e-5 3e-5 5e-5; do
5+
for batch_size in 64 128 256 512; do
6+
for dataset_name in nu-snli nu-mnli nu-snli+mnli; do
77
poetry run python src/train_sup.py \
8-
--dataset_name jsnli \
9-
--model_name $model_name \
10-
--batch_size $batch_size \
11-
--lr $lr \
12-
--gradient_checkpointing \
13-
--device $device
14-
15-
poetry run python src/train_sup.py \
16-
--dataset_name nu-snli \
17-
--model_name $model_name \
18-
--batch_size $batch_size \
19-
--lr $lr \
20-
--gradient_checkpointing \
21-
--device $device
22-
23-
poetry run python src/train_sup.py \
24-
--dataset_name nu-snli+mnli \
8+
--dataset_name $dataset_name \
259
--model_name $model_name \
2610
--batch_size $batch_size \
2711
--lr $lr \
12+
--use_jumanpp \
2813
--gradient_checkpointing \
2914
--device $device
3015
done

‎scripts/11/1.sh‎

Lines changed: 5 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,27 +1,11 @@
11
device="cuda:1"
22

3-
for i in 0 1 2; do
4-
for model_name in cl-tohoku/bert-large-japanese ku-nlp/roberta-large-japanese-char-wwm; do
5-
for batch_size in 512; do
6-
for lr in 5e-5 3e-5 1e-5; do
3+
for model_name in studio-ousia/mluke-large-lite xlm-roberta-large; do
4+
for lr in 1e-5 3e-5 5e-5; do
5+
for batch_size in 64 128 256 512; do
6+
for dataset_name in nu-snli nu-mnli nu-snli+mnli; do
77
poetry run python src/train_sup.py \
8-
--dataset_name jsnli \
9-
--model_name $model_name \
10-
--batch_size $batch_size \
11-
--lr $lr \
12-
--gradient_checkpointing \
13-
--device $device
14-
15-
poetry run python src/train_sup.py \
16-
--dataset_name nu-snli \
17-
--model_name $model_name \
18-
--batch_size $batch_size \
19-
--lr $lr \
20-
--gradient_checkpointing \
21-
--device $device
22-
23-
poetry run python src/train_sup.py \
24-
--dataset_name nu-snli+mnli \
8+
--dataset_name $dataset_name \
259
--model_name $model_name \
2610
--batch_size $batch_size \
2711
--lr $lr \

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /