diff --git a/promptsource/templates/SetFit/SentEval-CR/templates.yaml b/promptsource/templates/SetFit/SentEval-CR/templates.yaml new file mode 100644 index 000000000..432e7c3ef --- /dev/null +++ b/promptsource/templates/SetFit/SentEval-CR/templates.yaml @@ -0,0 +1,176 @@ +dataset: SentEval-CR +templates: + 3cb16f5d-1953-480c-bdea-785aa2d6aa34: !Template + answer_choices: Negative ||| Positive + id: 3cb16f5d-1953-480c-bdea-785aa2d6aa34 + jinja: 'Review: {{text}} + + Is the review positive or negative? ||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: Is_this_review + reference: '' + f32fb361-f9c5-42fa-b6ae-0af60fd00e6a: !Template + answer_choices: No ||| Yes + id: f32fb361-f9c5-42fa-b6ae-0af60fd00e6a + jinja: 'Based on this review, would the user recommend this product? + + === + + Review: {{text}} + + Answer: ||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: User_recommend_this_product + reference: 'Reformulation equivalent to sent analysis: would the user recommend + this product?' + 374e0086-077c-4be2-b533-e41d662cff5c: !Template + answer_choices: No ||| Yes + id: 374e0086-077c-4be2-b533-e41d662cff5c + jinja: 'Is this product review positive? + + Review: {{text}} + + Answer: ||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: Is_this_product_review_positive + reference: '' + 3798de66-71c3-4264-9910-72cc4f6546c0: !Template + answer_choices: Yes ||| No + id: 3798de66-71c3-4264-9910-72cc4f6546c0 + jinja: 'Review: {{text}} + + Is this product review negative?||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: Is_this_review_negative + reference: '' + 5a3bd9c8-f95d-4617-bdbd-15a46a810dcc: !Template + answer_choices: Negative ||| Positive + id: 5a3bd9c8-f95d-4617-bdbd-15a46a810dcc + jinja: 'Review: {{text}} + + Does this product review convey a negative or positive sentiment?||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: convey_negative_or_positive_sentiment + reference: '' + 5dda0a63-495f-4c65-8270-d3fb712d027b: !Template + answer_choices: Negative ||| Positive + id: 5dda0a63-495f-4c65-8270-d3fb712d027b + jinja: 'Is there a negative or positive tone to this product review? + + === + + Review: {{text}} + + Answer: ||| + + {{answer_choices[label]}}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: negative_or_positive_tone + reference: '' + 13353be3-aa37-490a-b9cb-253ab119b8e9: !Template + answer_choices: dissatisfied ||| satisfied + id: 13353be3-aa37-490a-b9cb-253ab119b8e9 + jinja: 'Here is a review left by a customer on a product. Would you say he was + {{answer_choices[1]}} or {{answer_choices[0]}}? + + Review: {{text}} + + ||| + + {{answer_choices[label]}} ' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: user_satisfied + reference: '' + d0517853-707c-46f6-80ff-2e8904e8657f: !Template + answer_choices: decrease ||| increase + id: d0517853-707c-46f6-80ff-2e8904e8657f + jinja: 'You are considering whether to buy a product. You look at the reviews. + Would the following review {{answer_choices[0]}} or {{answer_choices[1]}} the + chances of you buying the product? + + Review Product review: {{text}} + + ||| + + {{answer_choices[label]}} ' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: would_you_buy + reference: '' + c8aef874-4f2a-4382-9d3d-96fc52d3dba2: !Template + answer_choices: unflattering ||| flattering + id: c8aef874-4f2a-4382-9d3d-96fc52d3dba2 + jinja: 'Product review: {{text}} + + Would you say this review depicts the product in a {{answer_choices[1]}} or + {{answer_choices[0]}} light? + + ||| + + {{answer_choices[label]}} ' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: flattering_or_not + reference: '' diff --git a/promptsource/templates/SetFit/amazon_counterfactual_en/templates.yaml b/promptsource/templates/SetFit/amazon_counterfactual_en/templates.yaml new file mode 100644 index 000000000..5314fc635 --- /dev/null +++ b/promptsource/templates/SetFit/amazon_counterfactual_en/templates.yaml @@ -0,0 +1,114 @@ +dataset: amazon_counterfactual_en +templates: + 6eb62aee-a983-4571-9d49-9836e685ee93: !Template + answer_choices: Yes ||| No + id: 6eb62aee-a983-4571-9d49-9836e685ee93 + jinja: "{{text}} Is the statement factual? \n|||\n + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: is_factual + reference: '' + 6fc17a35-e1a3-4b5d-87d5-91d0fdf42d58: !Template + answer_choices: Yes ||| No + id: 6fc17a35-e1a3-4b5d-87d5-91d0fdf42d58 + jinja: "{{text}} Does the statement describe a fact? \n|||\n + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: describe_fact + reference: '' + e36e9d87-5366-4070-b95d-51ff5a890f4b: !Template + answer_choices: non-counterfactual ||| counterfactual + id: e36e9d87-5366-4070-b95d-51ff5a890f4b + jinja: "{{text}} Is the statement non-counterfactual or counterfactual? \n|||\n + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: choice_text_before + reference: '' + 044ee01d-a06d-47b6-9872-6c89f785a961: !Template + answer_choices: No ||| Yes + id: 044ee01d-a06d-47b6-9872-6c89f785a961 + jinja: "{{text}} Is the statement counterfactual? \n|||\n + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: is_counterfactual + reference: '' + 20c40ee7-ba1e-4e65-bb55-1c7b4e4cbcf7: !Template + answer_choices: No ||| Yes + id: 20c40ee7-ba1e-4e65-bb55-1c7b4e4cbcf7 + jinja: "{{text}} Does the sentence express an event that did not happen? \n|||\n + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: did_not_happen + reference: '' + 920ace8e-e063-4edf-b4f6-ac1a1e9f8559: !Template + answer_choices: Yes ||| No + id: 920ace8e-e063-4edf-b4f6-ac1a1e9f8559 + jinja: "{{text}} Does this describe an actual event? \n|||\n + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: actual_event + reference: '' + 9e2a56f5-a11d-497c-b02b-2ccc3e760503: !Template + answer_choices: Yes ||| No + id: 9e2a56f5-a11d-497c-b02b-2ccc3e760503 + jinja: "{{text}} Does the sentence contain events that did not or cannot take place? \n|||\n + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: take_place + reference: '' + 40c0007f-78be-43d0-80c8-df22d37ee64b: !Template + answer_choices: non-counterfactual ||| counterfactual + id: 40c0007f-78be-43d0-80c8-df22d37ee64b + jinja: "Is the label for the following sentence non-counterfactual or counterfactual? {{text}} \n|||\n + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: choice_text_after + reference: '' diff --git a/promptsource/templates/SetFit/enron_spam/templates.yaml b/promptsource/templates/SetFit/enron_spam/templates.yaml new file mode 100644 index 000000000..6f37e683b --- /dev/null +++ b/promptsource/templates/SetFit/enron_spam/templates.yaml @@ -0,0 +1,75 @@ +dataset: enron_spam +templates: + 9a9d877c-aeb1-4808-868d-47ac9627f333: !Template + answer_choices: not spam ||| spam + id: 9a9d877c-aeb1-4808-868d-47ac9627f333 + jinja: "What is the spam label for the following email message? {{text}} \n|||\n\ + {{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: spam_label + reference: '' + 59f74c9b-909f-4fe7-b822-334184a51d3f: !Template + answer_choices: True ||| False + id: 59f74c9b-909f-4fe7-b822-334184a51d3f + jinja: "Is this email message considered {{\"ham\"}} (i.e. not spam)? \n{{text}}\n\ + |||\n{{answer_choices[label]}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: ham_True_False + reference: '' + 29cc5489-e0d7-4c41-8bc2-a1735f91ca95: !Template + answer_choices: ham ||| spam + id: 29cc5489-e0d7-4c41-8bc2-a1735f91ca95 + jinja: 'Is the label for the following email message {{"ham"}} (not spam) or {{"spam"}}? + {{text}} + + ||| + + {{ answer_choices [label] }}' + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: is_the_label + reference: '' + 58a62a45-4cf7-4d57-aa70-0f8fb0a3043c: !Template + answer_choices: not spam||| spam + id: 58a62a45-4cf7-4d57-aa70-0f8fb0a3043c + jinja: "The following email message should be marked as \"spam\" or \"not spam\"\ + ? {{text}} \n|||\n{{ answer_choices [label] }}" + metadata: !TemplateMetadata + choices_in_prompt: true + languages: + - en + metrics: + - Accuracy + original_task: true + name: marked as + reference: '' + 5e7a18eb-8692-4312-b496-67d1e64f91fe: !Template + answer_choices: False ||| True + id: 5e7a18eb-8692-4312-b496-67d1e64f91fe + jinja: "Is this email message considered {{\"spam\"}}? \n{{text}}\n|||\n{{answer_choices[label]}}" + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: spam_True_False + reference: '' \ No newline at end of file diff --git a/promptsource/templates/SetFit/sst5/templates.yaml b/promptsource/templates/SetFit/sst5/templates.yaml new file mode 100644 index 000000000..30c42ddac --- /dev/null +++ b/promptsource/templates/SetFit/sst5/templates.yaml @@ -0,0 +1,149 @@ +dataset: sst5 +templates: + b969303e-d0ab-4ba5-ba0d-9a364b495313: !Template + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive + id: b969303e-d0ab-4ba5-ba0d-9a364b495313 + jinja: '{{ text }} + + So I would rate it ||| {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: so_i_would + reference: '' + 157770ee-e4d5-4e1b-b4bc-5ddd78e0f057: !Template + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive + id: 157770ee-e4d5-4e1b-b4bc-5ddd78e0f057 + jinja: '{{ text }} + + === + + Based on that, my rating is ||| {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: based_on_that + reference: '' + 7c5734b2-c05c-4bb0-a409-63efaed7ec7e: !Template + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive + id: 7c5734b2-c05c-4bb0-a409-63efaed7ec7e + jinja: 'Review text: + + {{ text }} + + + Score: ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: format_star + reference: simulating webpage + b262b6a5-8b0e-4be2-bf05-1b34ae9ee757: !Template + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive + id: b262b6a5-8b0e-4be2-bf05-1b34ae9ee757 + jinja: '{{ text }} My opinion of this movie is ||| {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: this_movie + reference: '' + 02b9e30e-b096-4ce9-b621-8ceb1dc24aa6: !Template + answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5 + id: 02b9e30e-b096-4ce9-b621-8ceb1dc24aa6 + jinja: 'Review text: + + {{ text }} + + + Review score (between 1 and 5): ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: format_score + reference: Simulating webpage + 8b34f5d-a195-428a-ad7f-4f6a56790e9c: !Template + answer_choices: 1 ||| 2 ||| 3 ||| 4 ||| 5 + id: 8b34f5d-a195-428a-ad7f-4f6a56790e9c + jinja: 'Review: {{text}} + + On a scale of 1 to 5, I would give this movie ||| {{ answer_choices[label] + }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: on_a_scale + reference: '' + 7652def8-ce5a-4de6-a381-32f77a9596d5: !Template + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive + id: 7652def8-ce5a-4de6-a381-32f77a9596d5 + jinja: 'Review text: + + {{ text }} + + + Review rating: ||| + + {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: format_rating + reference: It's simulating the format of a webpage. + 3798de66-71c3-4264-9910-72cc4f6546c2: !Template + answer_choices: very negative ||| negative ||| neutral ||| positive ||| very positive + id: 3798de66-71c3-4264-9910-72cc4f6546c2 + jinja: 'How do you feel about the following sentence? {{ text }} ||| {{ answer_choices[label] }}' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: feel + reference: It's simulating the format of a webpage. + 5dda0a63-495f-4c65-8270-d3fb712d027c: !Template + answer_choices: terrible ||| bad ||| okay ||| good ||| great + id: 5dda0a63-495f-4c65-8270-d3fb712d027c + jinja: '{{ text }} This movie is a very ||| {{ answer_choices[label] }} one' + metadata: !TemplateMetadata + choices_in_prompt: false + languages: + - en + metrics: + - Accuracy + original_task: true + name: feel + reference: It's simulating the format of a webpage. \ No newline at end of file