diff --git a/README.md b/README.md index 7866f36..1414ea0 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ # quiz +Install the required packages: +`cargo install mdbook; cargo install mdbook-quiz` + To run locally (in dl-quiz folder): -`mdbook serve --open` \ No newline at end of file +`mdbook serve --open` diff --git a/dl-quiz/src/quiz_21.toml b/dl-quiz/src/quiz_21.toml index 1080984..f14edda 100644 --- a/dl-quiz/src/quiz_21.toml +++ b/dl-quiz/src/quiz_21.toml @@ -1,6 +1,6 @@ [[questions]] type = "MultipleChoice" -prompt.prompt = "We run the following code:
 $ a = np.ones(5) \n$ b = trorch.from_numpy(a) \n$ print(b) \n tensor([1., 1., 1., 1., 1.], dtype=torch.float64)  
\n What is the output of `a.dtype`?" +prompt.prompt = "We run the following code:
 $ a = np.ones(5) \n$ b = torch.from_numpy(a) \n$ print(b) \n tensor([1., 1., 1., 1., 1.], dtype=torch.float64)  
\n What is the output of `a.dtype`?" prompt.choices = [ "`float8`", "`float32`", @@ -13,7 +13,7 @@ context = """ [[questions]] type = "MultipleChoice" -prompt.prompt = "We continue with:
 $ b = b.to(torch.uint8) \n$ a[2] = 0 \n$ b[3] = 5 
\n What is the output of `print(b)`?" +prompt.prompt = "We run the following code:
 $ a = np.ones(5) \n$ b = torch.from_numpy(a) \n$ print(b) \n tensor([1., 1., 1., 1., 1.], dtype=torch.float64)  
\n We continue with:
 $ b = b.to(torch.uint8) \n$ a[2] = 0 \n$ b[3] = 5 
\n What is the output of `print(b)`?" prompt.choices = [ "`[1. 1. 0. 1. 1.]`", "`[1. 0. 5. 1. 1.]`", diff --git a/dl-quiz/src/quiz_22.toml b/dl-quiz/src/quiz_22.toml index b0780a1..4144f15 100644 --- a/dl-quiz/src/quiz_22.toml +++ b/dl-quiz/src/quiz_22.toml @@ -12,7 +12,7 @@ context = """ [[questions]] type = "MultipleChoice" -prompt.prompt = "We continue with
 $ b = b.to(device) \n$ b[0] = 2 \n$ print(b) \n tensor([2., 1., 1., 1., 1.], device='cuda:0', dtype=torch.float64) 
\n What is the output of `print(a)`?" +prompt.prompt = "We run the following code:
 $ a = np.ones(5) \n$ b = torch.from_numpy(a) \n$ torch.cuda.is_available() \nTrue \n$ device = torch.device('cuda') \n$ b.to(device) 
\n We continue with
 $ b = b.to(device) \n$ b[0] = 2 \n$ print(b) \n tensor([2., 1., 1., 1., 1.], device='cuda:0', dtype=torch.float64) 
\n What is the output of `print(a)`?" prompt.choices = [ "`[2., 1., 1., 1., 1.]`", "`[1., 1., 1., 1., 1.]`" diff --git a/dl-quiz/src/quiz_23.toml b/dl-quiz/src/quiz_23.toml index dcab2dc..7947af7 100644 --- a/dl-quiz/src/quiz_23.toml +++ b/dl-quiz/src/quiz_23.toml @@ -1,6 +1,6 @@ [[questions]] type = "MultipleChoice" -prompt.prompt = "We run the following code:
 $ b = torch.tensor([1, 1, 1, 5, 1], dtype=torch.float)
\n What is the output of `print(b.requires_grad)`?" +prompt.prompt = "We run the following code:
$ b = torch.tensor([1, 1, 1, 5, 1], dtype=torch.float)
\n What is the output of `print(b.requires_grad)`?" prompt.choices = [ "`True`", "`False`", @@ -14,7 +14,7 @@ Default value is `False`. [[questions]] type = "MultipleChoice" -prompt.prompt = "We continue with:
 $ b.requires_grad_(True) \ntensor([1., 1., 1., 5., 1.], requires_grad=True)
\n What is the output of `print(b.grad)`?" +prompt.prompt = "We run the following code:
$ b = torch.tensor([1, 1, 1, 5, 1], dtype=torch.float)
\n We continue with:
$ b.requires_grad_(True) \ntensor([1., 1., 1., 5., 1.], requires_grad=True)
\n What is the output of `print(b.grad)`?" prompt.choices = [ "`0`", "`1`", @@ -28,7 +28,7 @@ Default value is `None`. [[questions]] type = "MultipleChoice" -prompt.prompt = "We continue with:
 $ l = sum((b-1)**2) \n$ l.backward() \n$ print(l) \ntensor(16., grad_fn=)
\n What is the output of `print(b.grad)`?" +prompt.prompt = "We run the following code:
$ b = torch.tensor([1, 1, 1, 5, 1], dtype=torch.float)\n$ b.requires_grad_(True) \ntensor([1., 1., 1., 5., 1.], requires_grad=True)
\n We continue with:
$ l = sum((b-1)**2) \n$ l.backward() \n$ print(l) \ntensor(16., grad_fn=)
\n What is the output of `print(b.grad)`?" prompt.choices = [ "`tensor([2.])`", "`tensor([2., 2., 2., 10., 2.])`", @@ -41,7 +41,7 @@ The derivative of each component is `2(b_i-1)`. [[questions]] type = "MultipleChoice" -prompt.prompt = "We continue with:
 $ m = sum((b.data -1)**2) 
\n What is the output of `m.backward()`?" +prompt.prompt = "We run the following code:
$ b = torch.tensor([1, 1, 1, 5, 1], dtype=torch.float)\n$ b.requires_grad_(True) \ntensor([1., 1., 1., 5., 1.], requires_grad=True)\n$ l = sum((b-1)**2) \n$ l.backward() \n$ print(l) \ntensor(16., grad_fn=)
\n We continue with:
$ m = sum((b.data -1)**2) \n 
\n What is the output of `m.backward()`?" prompt.choices = [ "same as for `l`", "`Error`" @@ -53,7 +53,7 @@ You cannot compute gradient if you detach the `data` from the `tensor`. [[questions]] type = "MultipleChoice" -prompt.prompt = "We continue with:
 $ n = sum((b-1)**2) \n$ n.backward() 
\n What is the output of `print(b.grad)`?" +prompt.prompt = "We run the following code:
$ b = torch.tensor([1, 1, 1, 5, 1], dtype=torch.float)\n$ b.requires_grad_(True) \ntensor([1., 1., 1., 5., 1.], requires_grad=True)\n$ l = sum((b-1)**2) \n$ l.backward() \n$ print(l) \ntensor(16., grad_fn=) \n$ m = sum((b.data -1)**2)
\n We continue with:
$ n = sum((b-1)**2) \n$ n.backward() 
\n What is the output of `print(b.grad)`?" prompt.choices = [ "`tensor([0., 0., 0., 8., 0.])`", "`tensor([0., 0., 0., 16., 0.])`" @@ -65,7 +65,7 @@ Gradients are added by default. [[questions]] type = "MultipleChoice" -prompt.prompt = "We continue with:
 $ n.requires_grad_(True) \n$ n.backward()
\n What is the output of `print(b.grad)`?" +prompt.prompt = "We run the following code:
$ b = torch.tensor([1, 1, 1, 5, 1], dtype=torch.float)\n$ b.requires_grad_(True) \ntensor([1., 1., 1., 5., 1.], requires_grad=True)\n$ l = sum((b-1)**2) \n$ l.backward() \n$ print(l) \ntensor(16., grad_fn=)\n$ m = sum((b.data -1)**2) \n$ n = sum((b-1)**2) \n$ n.backward() \n 
\n We continue with:
$ n.requires_grad_(True) \n$ n.backward()
\n What is the output of `print(b.grad)`?" prompt.choices = [ "`tensor([0., 0., 0., 16., 0.])`", "`tensor([0., 0., 0., 32., 0.])`" @@ -77,7 +77,7 @@ There is an error when at `n.backward()`: `RuntimeError: Trying to backward thro [[questions]] type = "MultipleChoice" -prompt.prompt = "We continue with:
 $ m.requires_grad_(True) \n$ m.backward()
\n What is the output of `print(b.grad)`?" +prompt.prompt = "We run the following code:
$ b = torch.tensor([1, 1, 1, 5, 1], dtype=torch.float)\n$ b.requires_grad_(True) \ntensor([1., 1., 1., 5., 1.], requires_grad=True)\n$ l = sum((b-1)**2) \n$ l.backward() \n$ print(l) \ntensor(16., grad_fn=) \n$ m = sum((b.data -1)**2) \n$ n = sum((b-1)**2) \n$ n.backward() \n$ n.requires_grad_(True) \n$ n.backward()
\n We continue with:
$ m.requires_grad_(True) \n$ m.backward()
\n What is the output of `print(b.grad)`?" prompt.choices = [ "there is an error at `m.backward()`", "`tensor([0., 0., 0., 8., 0.])`",