-
Notifications
You must be signed in to change notification settings - Fork 48
/
Copy pathmemory_safety_inner.cc
150 lines (131 loc) · 4.77 KB
/
memory_safety_inner.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
// Copyright Microsoft and CHERIoT Contributors.
// SPDX-License-Identifier: MIT
#define TEST_NAME "Memory safety (inner compartment)"
#include "memory_safety.h"
#include <cheri.hh>
#include <debug.hh>
#include <errno.h>
/// Expose debugging features unconditionally for this compartment.
using Debug = ConditionalDebug<true, "Memory safety compartment">;
using namespace CHERI;
char *allocation;
static char *volatile volatilePointer;
extern "C" ErrorRecoveryBehaviour
compartment_error_handler(ErrorState *frame, size_t mcause, size_t mtval)
{
auto [exceptionCode, registerNumber] = extract_cheri_mtval(mtval);
void **faultingRegister = frame->get_register_value(registerNumber);
Debug::Invariant(faultingRegister != nullptr,
"get_register_value returned NULL unexpectedly");
Debug::log("Detected error in instruction {}", frame->pcc);
Debug::log("Detected {}: Register {} contained "
"invalid value: {}",
exceptionCode,
registerNumber,
*faultingRegister);
/*
* `free` already checks for non-NULL and capability's validity
*/
free(allocation);
return ErrorRecoveryBehaviour::ForceUnwind;
}
int memory_safety_inner_entry(MemorySafetyBugClass operation)
{
size_t length = 0x100;
switch (operation)
{
case MemorySafetyBugClass::StackLinearOverflow:
{
/*
* Trigger a stack linear overflow overflow, by storing one byte
* beyond an allocation bounds. The bounds checks are performed in
* the architectural level, by the CPU. Each capability carries the
* allocation bounds.
*/
int arr[0x10];
int *ptr = std::launder(arr);
auto capFault = [=]() {
__c11_atomic_signal_fence(__ATOMIC_SEQ_CST);
ptr[sizeof(arr) / sizeof(arr[0])] = 0;
__c11_atomic_signal_fence(__ATOMIC_SEQ_CST);
Debug::log("use {}", ptr[sizeof(arr) / sizeof(arr[0])]);
};
Debug::log("Trigger stack linear overflow");
capFault();
Debug::Assert(false, "Code after overflow should be unreachable");
}
case MemorySafetyBugClass::HeapLinearOverflow:
{
/*
* Trigger a linear overflow on the heap, by storing one byte beyond
* an allocation bounds. The bounds checks are performed in the
* architectural level, by the CPU. Each capability carries the
* allocation bounds.
*/
allocation = static_cast<char *>(malloc(length));
Debug::Assert(allocation != NULL,
"Allocation failed in HeapLinearOverflow");
Debug::log("Trigger heap linear overflow");
allocation[length] = '\x41';
Debug::Assert(false, "Code after overflow should be unreachable");
}
case MemorySafetyBugClass::HeapNonlinearOverflow:
{
/*
* Trigger a non-linear overflow on the heap, by triggering a store
* beyond the allocation bounds. The bounds checks are performed in
* the architectural level, by the CPU. Each capability carries the
* allocation bounds.
*/
allocation = static_cast<char *>(malloc(length));
Debug::Assert(allocation != NULL,
"Allcoation failed in HeapNonlinearOverflow");
Debug::log("Trigger heap nonlinear overflow");
allocation[length * 2] = '\x41';
Debug::Assert(false, "Code after overflow should be unreachable");
}
case MemorySafetyBugClass::HeapUseAfterFree:
{
/*
* Trigger a use after free, by storing a byte to an allocation
* beyond the end of its lifetime.
*/
allocation = static_cast<char *>(malloc(length));
Debug::Assert(allocation != NULL,
"Allcoation failed in HeapUseAfterFree");
free(allocation);
/*
* From this point forward, any dereference of any dangling pointer
* to the freed memory will trap. This is guaranteed by the hardware
* load barrier that, on loads of capabilities to the memory region
* that can be used as a heap, checks the revocation bit
* corresponding to the base of the capability and clears the tag if
* it is set. For more details, see docs/architecture.md.
*/
Debug::log("Trigger heap use after free");
allocation[0] = '\x41';
Debug::Assert(false,
"Code after use after free should be unreachable");
}
case MemorySafetyBugClass::StoreStackPointerToGlobal:
{
/*
* Storing a stack pointer to a global variable makes it invalid.
* This is enforced by the Global (G) permission bit in the
* capability.
* This provides strong thread-isolation guarantees: data stored
* on the stack is never vulnerable to concurrent mutation.
*/
char buf[0x10];
Debug::log("Trigger storing a stack pointer {} into global",
Capability{buf});
volatilePointer = buf;
Capability tmp = volatilePointer;
Debug::log("tmp: {}", tmp);
Debug::Assert(!tmp.is_valid(),
"Stack pointer stored into global should be invalid");
return tmp[0];
}
}
return 0;
}