-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathevaluate.sh
More file actions
executable file
·139 lines (102 loc) · 14.6 KB
/
evaluate.sh
File metadata and controls
executable file
·139 lines (102 loc) · 14.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
#!/bin/bash
log_error() {
echo "$(date) - Error in script at line $1: $2" >> error_log.txt
}
trap 'log_error $LINENO "$BASH_COMMAND"' ERR
# ----------------------------------------------------------------------------------
# It's important to note that:
# - evaluation takes around 10 minutes on a L4 GPU (availabe on lightning.ai)
# - the baselines don't require any checkpoints other than the automatically donwloaded mT5 checkppoint
# - other runs require checkpoints that can be downloaded the line before the python command
# - the checkpoints of the CG model are 1.2GB and for the other models this is 657MB
# - batch size 1 is required for evaluation
# ----------------------------------------------------------------------------------
cd code
### Baselines (don't require further checkpoints):
# python evaluate.py --model CG --dataset demo --evaltrain --prompt titles
# python evaluate.py --model CG --dataset demo --prompt titles
# python evaluate.py --model CG --dataset demo --evaltrain --prompt subtitles
# python evaluate.py --model CG --dataset demo --prompt subtitles
# python evaluate.py --model CG --dataset demo --evaltrain --prompt diversity
# python evaluate.py --model CG --dataset demo --prompt diversity
# python evaluate.py --model CG --dataset demo --evaltrain --prompt pubtime
# python evaluate.py --model CG --dataset demo --prompt pubtime
### Evaluating the trained models
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_CGc_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CGc_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CGc_prompt_titles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_CGc_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CGc_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CGc_prompt_titles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_CGc_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CGc_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CGc_prompt_subtitles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_CGc_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CGc_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CGc_prompt_subtitles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_CGc_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CGc_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CGc_prompt_diversity.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_CGc_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CGc_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CGc_prompt_diversity.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_CGc_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CGc_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CGc_prompt_pubtime.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_CGc_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CGc_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model CGc --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CGc_prompt_pubtime.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_CG_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CG_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CG_prompt_titles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_CG_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CG_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CG_prompt_titles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_CG_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CG_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CG_prompt_subtitles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_CG_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CG_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CG_prompt_subtitles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_CG_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CG_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CG_prompt_diversity.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_CG_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CG_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CG_prompt_diversity.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_CG_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CG_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_CG_prompt_pubtime.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_CG_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CG_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model CG --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_CG_prompt_pubtime.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_QA_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_QA_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_QA_prompt_titles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_QA_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_QA_prompt_titles.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt titles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_QA_prompt_titles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_QA_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_QA_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_QA_prompt_subtitles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_QA_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_QA_prompt_subtitles.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt subtitles --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_QA_prompt_subtitles.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_QA_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_QA_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_QA_prompt_diversity.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_QA_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_QA_prompt_diversity.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt diversity --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_QA_prompt_diversity.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.0_model_QA_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_QA_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.0_model_QA_prompt_pubtime.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_0.0001_lab_0.4_model_QA_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_QA_prompt_pubtime.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA --prompt pubtime --from_checkpoint ../checkpoints/model_lr_0.0001_lab_0.4_model_QA_prompt_pubtime.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_5e-05_lab_0.0_model_QA+_prompt_QA+.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA+ --prompt QA+ --from_checkpoint ../checkpoints/model_lr_5e-05_lab_0.0_model_QA+_prompt_QA+.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA+ --prompt QA+ --from_checkpoint ../checkpoints/model_lr_5e-05_lab_0.0_model_QA+_prompt_QA+.pth --evaltrain
# wget --header="Referer: https://huggingface.co/" -P ../checkpoints https://huggingface.co/Wouter01/mT5Ranking/resolve/main/model_lr_5e-05_lab_0.4_model_QA+_prompt_QA+.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA+ --prompt QA+ --from_checkpoint ../checkpoints/model_lr_5e-05_lab_0.4_model_QA+_prompt_QA+.pth
# python evaluate.py --batch_size 1 --dataset demo --model QA+ --prompt QA+ --from_checkpoint ../checkpoints/model_lr_5e-05_lab_0.4_model_QA+_prompt_QA+.pth --evaltrain