diff --git a/eval/eval_article_quality.py b/eval/eval_article_quality.py index 71869381..bf24f492 100644 --- a/eval/eval_article_quality.py +++ b/eval/eval_article_quality.py @@ -170,7 +170,7 @@ def main(args): help='Directory to store the evaluation results. ' 'Each article evaluation will be saved as separate file named after {topic_name}.json') parser.add_argument('--pred-file-name', help='Name of the article file.') - parser.add_argument("--prompt-template-path", default="./prompts/eval_prometheus_no_ref.prompt", + parser.add_argument("--prompt-template-path", default="./eval_prometheus_no_ref.prompt", help='path to evaluation prometheus prompt template') parser.add_argument("--rubric-path", default="./eval_rubric_5.json", help='path to rubric json file') diff --git a/eval/evaluation_prometheus.py b/eval/evaluation_prometheus.py index b3d52f5a..3f20a9c4 100644 --- a/eval/evaluation_prometheus.py +++ b/eval/evaluation_prometheus.py @@ -85,8 +85,8 @@ def get_grading_dict(responses, topic, tokenizer, model, - prompt_template_path="./prompts/eval_prometheus_no_ref.prompt", - rubric_path="./prompts/eval_rubric_5.json", + prompt_template_path="./eval_prometheus_no_ref.prompt", + rubric_path="./eval_rubric_5.json", disable_sample=False, temperature=0.01, top_p=0.95, @@ -165,7 +165,7 @@ def main(args): parser.add_argument('-o', '--output_path', required=True, help='Path to save the output JSON file') parser.add_argument('-t', "--topic", required=True, help="Topic of the script your going to analyze") - parser.add_argument("--prompt_template_path", default="./prompts/eval_prometheus_no_ref.prompt", + parser.add_argument("--prompt_template_path", default="./eval_prometheus_no_ref.prompt", help='path to evaluation prometheus prompt template') parser.add_argument("--rubric_path", default="./prompts/eval_rubric_5.json", help='path to rubric json file')