|
1 | 1 | #!/usr/bin/python3
|
2 | 2 |
|
3 |
| -import os |
4 |
| -import re |
5 |
| -from os import path |
6 |
| -import json |
7 |
| -import argparse |
8 |
| -from collections import OrderedDict |
9 |
| -import uuid |
| 3 | +print('''For the latest version of this script, please go to: |
10 | 4 |
|
11 |
| -import canvas |
12 |
| - |
13 |
| -def file_name_only(name): |
14 |
| - return re.sub('[\W_]+', '', name) |
15 |
| - |
16 |
| -parser = argparse.ArgumentParser() |
17 |
| -canvas.Canvas.add_arguments(parser, quiz=True) |
18 |
| -parser.add_argument("pl_repo", |
19 |
| - help="Directory where PrairieLearn repo is stored") |
20 |
| -parser.add_argument("pl_course_instance", |
21 |
| - help="Course instance where assessment will be created") |
22 |
| -parser.add_argument("-s", "--assessment-set", default="Quiz", |
23 |
| - help="Assessment set to assign this assessment to") |
24 |
| -parser.add_argument("-n", "--assessment-number", default="", |
25 |
| - help="Assessment set to assign this assessment to") |
26 |
| -parser.add_argument("--topic", default="None", |
27 |
| - help="Assessment set to assign this assessment to") |
28 |
| -args = parser.parse_args() |
29 |
| -canvas = canvas.Canvas(args=args) |
30 |
| - |
31 |
| -if not os.path.exists(os.path.join(args.pl_repo, 'infoCourse.json')): |
32 |
| - raise Exception("Provided directory is not a PrairieLearn repository") |
33 |
| - |
34 |
| -print('Reading data from Canvas...') |
35 |
| -course = canvas.course(args.course, prompt_if_needed=True) |
36 |
| -print('Using course: %s / %s' % (course['term']['name'], |
37 |
| - course['course_code'])) |
38 |
| - |
39 |
| -quiz = course.quiz(args.quiz, prompt_if_needed=True) |
40 |
| -print('Using quiz: %s' % (quiz['title'])) |
41 |
| - |
42 |
| -# Reading questions |
43 |
| -print('Retrieving quiz questions from Canvas...') |
44 |
| -(questions, groups) = quiz.questions() |
45 |
| - |
46 |
| -questions_dir = os.path.join(args.pl_repo, 'questions', file_name_only(quiz['title'])) |
47 |
| -if not os.path.isdir(questions_dir): |
48 |
| - os.makedirs(questions_dir) |
49 |
| -assessments_dir = os.path.join(args.pl_repo, 'courseInstances', args.pl_course_instance, 'assessments') |
50 |
| -if not os.path.isdir(assessments_dir): |
51 |
| - os.makedirs(assessments_dir) |
52 |
| - |
53 |
| -quiz_name = os.path.join(assessments_dir, file_name_only(quiz['title'])) |
54 |
| -if os.path.exists(quiz_name): |
55 |
| - suffix = 1 |
56 |
| - while os.path.exists(f'{quiz_name}_{suffix}'): |
57 |
| - suffix += 1 |
58 |
| - quiz_name = f'{quiz_name}_{suffix}' |
59 |
| -os.makedirs(quiz_name) |
60 |
| - |
61 |
| -pl_quiz = { |
62 |
| - 'uuid': str(uuid.uuid4()), |
63 |
| - 'type': 'Exam' if quiz['time_limit'] else 'Homework', |
64 |
| - 'title': quiz['title'], |
65 |
| - 'text': quiz['description'], |
66 |
| - 'set': args.assessment_set, |
67 |
| - 'number': args.assessment_number, |
68 |
| - 'allowAccess': [{ |
69 |
| - 'startDate': quiz['unlock_at'], |
70 |
| - 'endDate': quiz['lock_at'], |
71 |
| - 'credit': 100, |
72 |
| - 'timeLimitMin': quiz['time_limit'], |
73 |
| - 'showClosedAssessment': True, |
74 |
| - 'showClosedAssessmentScore': True |
75 |
| - }], |
76 |
| - 'zones': [{ |
77 |
| - 'questions': [] |
78 |
| - }], |
79 |
| - 'comment': f'Imported from Canvas, quiz {quiz["id"]}' |
80 |
| -} |
81 |
| - |
82 |
| -if (quiz['access_code']): |
83 |
| - quiz['allowAccess'][0]['password'] = quiz['access_code'] |
84 |
| - quiz['allowAccess'][1]['password'] = quiz['access_code'] |
85 |
| - |
86 |
| -for question in questions.values(): |
87 |
| - print(f'Handling question {question["id"]}...') |
88 |
| - print(question['question_text']) |
89 |
| - question_title = input('Question title: ') |
90 |
| - question_name = file_name_only(question_title) |
91 |
| - suffix = 0 |
92 |
| - while os.path.exists(os.path.join(questions_dir, question_name)): |
93 |
| - suffix += 1 |
94 |
| - question_name = f'{file_name_only(question_title)}_{suffix}' |
95 |
| - question_dir = os.path.join(questions_dir, question_name) |
96 |
| - os.makedirs(question_dir) |
97 |
| - |
98 |
| - pl_quiz['zones'][0]['questions'].append({ |
99 |
| - 'id': file_name_only(quiz['title']) + '/' + question_name, |
100 |
| - 'points': question['points_possible'] |
101 |
| - }) |
102 |
| - |
103 |
| - with open(os.path.join(question_dir, 'info.json'), 'w') as info: |
104 |
| - obj = { |
105 |
| - 'uuid': str(uuid.uuid4()), |
106 |
| - 'type': 'v3', |
107 |
| - 'title': question_title, |
108 |
| - 'topic': args.topic, |
109 |
| - 'tags': ['fromcanvas'] |
110 |
| - } |
111 |
| - if question['question_type'] == 'text_only_question' or \ |
112 |
| - question['question_type'] == 'essay_question': |
113 |
| - obj['gradingMethod'] = 'Manual' |
114 |
| - json.dump(obj, info, indent=4) |
115 |
| - |
116 |
| - with open(os.path.join(question_dir, 'question.html'), 'w') as template: |
117 |
| - if question['question_type'] == 'text_only_question': |
118 |
| - template.write('<pl-question-panel>\n<p>\n') |
119 |
| - template.write(question['question_text'] + '\n') |
120 |
| - template.write('</p>\n</pl-question-panel>\n') |
121 |
| - elif question['question_type'] == 'essay_question': |
122 |
| - template.write('<pl-question-panel>\n<p>\n') |
123 |
| - template.write(question['question_text'] + '\n') |
124 |
| - template.write('</p>\n</pl-question-panel>\n') |
125 |
| - template.write('<pl-file-editor file-name="answer.txt"></pl-file-editor>\n') |
126 |
| - template.write('<pl-submission-panel>\n') |
127 |
| - template.write(' <pl-feedback></pl-feedback>\n') |
128 |
| - template.write(' <pl-file-preview></pl-file-preview>\n') |
129 |
| - template.write('</pl-submission-panel>\n') |
130 |
| - elif question['question_type'] == 'multiple_answers_question': |
131 |
| - template.write('<pl-question-panel>\n<p>\n') |
132 |
| - template.write(question['question_text'] + '\n') |
133 |
| - template.write('</p>\n</pl-question-panel>\n') |
134 |
| - template.write('<pl-checkbox answers-name="checkbox">\n') |
135 |
| - for answer in question['answers']: |
136 |
| - if answer['weight']: |
137 |
| - template.write(' <pl-answer correct="true">') |
138 |
| - else: |
139 |
| - template.write(' <pl-answer>') |
140 |
| - template.write(answer['text'] + '</pl-answer>\n') |
141 |
| - template.write('</pl-checkbox>\n') |
142 |
| - elif question['question_type'] == 'true_false_question' or question['question_type'] == 'multiple_choice_question': |
143 |
| - template.write('<pl-question-panel>\n<p>\n') |
144 |
| - template.write(question['question_text'] + '\n') |
145 |
| - template.write('</p>\n</pl-question-panel>\n') |
146 |
| - template.write('<pl-multiple-choice answers-name="mc">\n') |
147 |
| - for answer in question['answers']: |
148 |
| - if answer['weight']: |
149 |
| - template.write(' <pl-answer correct="true">') |
150 |
| - else: |
151 |
| - template.write(' <pl-answer>') |
152 |
| - template.write(answer['text'] + '</pl-answer>\n') |
153 |
| - template.write('</pl-checkbox>\n') |
154 |
| - elif question['question_type'] == 'numerical_question': |
155 |
| - template.write('<pl-question-panel>\n<p>\n') |
156 |
| - template.write(question['question_text'] + '\n') |
157 |
| - template.write('</p>\n</pl-question-panel>\n') |
158 |
| - answer = question['answers'][0] |
159 |
| - if answer['numerical_answer_type'] == 'exact_answer' and abs(answer['exact'] - int(answer['exact'])) < 0.001 and answer['margin'] == 0: |
160 |
| - template.write(f'<pl-integer-input answers-name="value" correct-answer="{int(answer["exact"])}"></pl-integer-input>\n') |
161 |
| - elif answer['numerical_answer_type'] == 'exact_answer': |
162 |
| - template.write(f'<pl-number-input answers-name="value" correct-answer="{answer["exact"]}" atol="{answer["margin"]}"></pl-number-input>\n') |
163 |
| - elif answer['numerical_answer_type'] == 'range_answer': |
164 |
| - average = (answer["end"] + answer["start"]) / 2 |
165 |
| - margin = abs(answer["end"] - average) |
166 |
| - template.write(f'<pl-number-input answers-name="value" correct-answer="{average}" atol="{margin}"></pl-number-input>\n') |
167 |
| - elif answer['numerical_answer_type'] == 'precision_answer': |
168 |
| - template.write(f'<pl-number-input answers-name="value" correct-answer="{answer["approximate"]}" comparison="sigfig" digits="{answer["precision"]}"></pl-number-input>\n') |
169 |
| - else: |
170 |
| - print(f'Invalid numerical answer type: {answer["numerical_answer_type"]}') |
171 |
| - template.write(f'<pl-number-input answers-name="value"></pl-number-input>\n') |
172 |
| - elif question['question_type'] == 'calculated_question': |
173 |
| - for variable in question['variables']: |
174 |
| - question['question_text'] = question['question_text'].replace(f'[{variable["name"]}]', '{{params.' + variable["name"] + '}}') |
175 |
| - template.write('<pl-question-panel>\n<p>\n') |
176 |
| - template.write(question['question_text'] + '\n') |
177 |
| - template.write('</p>\n</pl-question-panel>\n') |
178 |
| - answers_name = question['formulas'][-1]['formula'].split('=')[0].strip() |
179 |
| - template.write(f'<pl-number-input answers-name="{answers_name}" comparison="decdig" digits="{question["formula_decimal_places"]}"></pl-number-input>\n') |
180 |
| - elif question['question_type'] == 'short_answer_question': |
181 |
| - template.write('<pl-question-panel>\n<p>\n') |
182 |
| - template.write(question['question_text'] + '\n') |
183 |
| - template.write('</p>\n</pl-question-panel>\n') |
184 |
| - answer = question['answers'][0] |
185 |
| - template.write(f'<pl-string-input answers-name="input" correct-answer="{answer["text"]}"></pl-string-input>\n') |
186 |
| - |
187 |
| - elif question['question_type'] == 'fill_in_multiple_blanks_question': |
188 |
| - question_text = question['question_text'] |
189 |
| - options = {} |
190 |
| - for answer in question['answers']: |
191 |
| - if answer['blank_id'] not in options: |
192 |
| - options[answer['blank_id']] = [] |
193 |
| - options[answer['blank_id']].append(answer) |
194 |
| - for answer_id, answers in options.items(): |
195 |
| - question_text.replace(f'[{answer_id}]', f'<pl-string-input answers-name="{answer_id}" correct-answer="{answers[0]["text"]}" remove-spaces="true" ignore-case="true" display="inline"></pl-string-input>') |
196 |
| - |
197 |
| - template.write(question_text + '\n') |
198 |
| - |
199 |
| - elif question['question_type'] == 'matching_question': |
200 |
| - template.write('<pl-question-panel>\n<p>\n') |
201 |
| - template.write(question['question_text'] + '\n') |
202 |
| - template.write('</p>\n</pl-question-panel>\n') |
203 |
| - template.write('<pl-matching answers-name="match">\n') |
204 |
| - for answer in question['answers']: |
205 |
| - template.write(f' <pl-statement match="m{answer["match_id"]}">{answer["text"]}</pl-statement>\n'); |
206 |
| - for match in question['matches']: |
207 |
| - template.write(f' <pl-option name="m{match["match_id"]}">{match["text"]}</pl-option>\n'); |
208 |
| - template.write('</pl-matching>\n') |
209 |
| - |
210 |
| - elif question['question_type'] == 'multiple_dropdowns_question': |
211 |
| - blanks = {} |
212 |
| - for answer in question['answers']: |
213 |
| - if answer['blank_id'] not in blanks: |
214 |
| - blanks[answer['blank_id']] = [] |
215 |
| - blanks[answer['blank_id']].append(answer) |
216 |
| - question_text = question['question_text'] |
217 |
| - for blank, answers in blanks.items(): |
218 |
| - dropdown = f'<pl-dropdown answers-name="{blank}">\n' |
219 |
| - for answer in answers: |
220 |
| - dropdown += ' <pl-answer' |
221 |
| - if answer['weight'] > 0: dropdown += ' correct="true"' |
222 |
| - dropdown += f'>{answer["text"]}</pl-answer>\n' |
223 |
| - dropdown += '</pl-dropdown>' |
224 |
| - question_text = question_text.replace(f'[{blank}]', dropdown) |
225 |
| - template.write('<pl-question-panel>\n<p>\n') |
226 |
| - template.write(question_text + '\n') |
227 |
| - template.write('</p>\n</pl-question-panel>\n') |
228 |
| - |
229 |
| - else: |
230 |
| - print('Unsupported question type: ' + question['question_type']) |
231 |
| - template.write('<pl-question-panel>\n<p>\n') |
232 |
| - template.write(question['question_text'] + '\n') |
233 |
| - template.write('</p>\n</pl-question-panel>\n') |
234 |
| - template.write(json.dumps(question, indent=4)) |
235 |
| - |
236 |
| - if question['correct_comments'] or question['neutral_comments']: |
237 |
| - template.write('<pl-answer-panel>\n<p>\n') |
238 |
| - if question.get('correct_comments_html', False): |
239 |
| - template.write(question['correct_comments_html'] + '\n') |
240 |
| - elif question['correct_comments']: |
241 |
| - template.write(question['correct_comments'] + '\n') |
242 |
| - if question.get('neutral_comments_html', False): |
243 |
| - template.write(question['neutral_comments_html'] + '\n') |
244 |
| - elif question['neutral_comments']: |
245 |
| - template.write(question['neutral_comments'] + '\n') |
246 |
| - template.write('</p>\n</pl-answer-panel>\n') |
247 |
| - |
248 |
| - if question['question_type'] == 'calculated_question': |
249 |
| - with open(os.path.join(question_dir, 'server.py'), 'w') as script: |
250 |
| - script.write('import random\n\n') |
251 |
| - script.write('def generate(data):\n') |
252 |
| - for variable in question['variables']: |
253 |
| - if not variable.get('scale', False): |
254 |
| - script.write(f' {variable["name"]} = random.randint({int(variable["min"])}, {int(variable["max"])})\n') |
255 |
| - else: |
256 |
| - multip = 10 ** variable["scale"] |
257 |
| - script.write(f' {variable["name"]} = random.randint({int(variable["min"] * multip)}, {int(variable["max"] * multip)}) / {multip}\n') |
258 |
| - for formula in question['formulas']: |
259 |
| - script.write(f' {formula["formula"]}\n') |
260 |
| - for variable in question['variables']: |
261 |
| - script.write(f' data["params"]["{variable["name"]}"] = {variable["name"]}\n') |
262 |
| - answer = question["formulas"][-1]['formula'].split('=')[0].strip() |
263 |
| - script.write(f' data["correct_answers"]["{answer}"] = {answer}\n') |
264 |
| - |
265 |
| -with open(os.path.join(quiz_name, 'infoAssessment.json'), 'w') as assessment: |
266 |
| - json.dump(pl_quiz, assessment, indent=4) |
267 |
| - |
268 |
| -print('\nDONE.') |
| 5 | +\thttps://github.com/PrairieLearn/PrairieLearn/tree/master/tools/question_converters/canvas |
| 6 | +''') |
0 commit comments