@@ -51,13 +51,13 @@ class QemuConsole():
51
51
def __init__ (self , qemu_binary = None , pnor = None , skiboot = None ,
52
52
prompt = None , kernel = None , initramfs = None ,
53
53
block_setup_term = None , delaybeforesend = None ,
54
- logfile = sys .stdout , hda = None , cdrom = None ):
54
+ logfile = sys .stdout , disks = None , cdrom = None ):
55
55
self .qemu_binary = qemu_binary
56
56
self .pnor = pnor
57
57
self .skiboot = skiboot
58
58
self .kernel = kernel
59
59
self .initramfs = initramfs
60
- self .hda = hda
60
+ self .disks = disks
61
61
self .state = ConsoleState .DISCONNECTED
62
62
self .logfile = logfile
63
63
self .delaybeforesend = delaybeforesend
@@ -71,6 +71,7 @@ def __init__(self, qemu_binary=None, pnor=None, skiboot=None,
71
71
self .block_setup_term = block_setup_term # allows caller specific control of when to block setup_term
72
72
self .setup_term_quiet = 0 # tells setup_term to not throw exceptions, like when system off
73
73
self .setup_term_disable = 0 # flags the object to abandon setup_term operations, like when system off
74
+ self .mac_str = '52:54:00:22:34:56'
74
75
75
76
# state tracking, reset on boot and state changes
76
77
# console tracking done on System object for the system console
@@ -101,6 +102,10 @@ def disable_setup_term_quiet(self):
101
102
self .setup_term_quiet = 0
102
103
self .setup_term_disable = 0
103
104
105
+ # Because this makes sense for the console
106
+ def update_disks (self , disks ):
107
+ self .disks = disks
108
+
104
109
def close (self ):
105
110
self .util .clear_state (self )
106
111
try :
@@ -141,22 +146,81 @@ def connect(self):
141
146
if self .initramfs is not None :
142
147
cmd = cmd + " -initrd %s" % (self .initramfs )
143
148
144
- if self .hda is not None :
145
- # Put the disk on the first PHB
146
- cmd = (cmd
147
- + " -drive file={},id=disk01,if=none" .format (self .hda )
148
- + " -device virtio-blk-pci,drive=disk01,id=virtio01,bus=pcie.0,addr=0"
149
+ # So in the powernv QEMU model we have 3 PHBs with one slot free each.
150
+ # We can add a pcie bridge to each of these, and each bridge has 31
151
+ # slots.. if you see where I'm going..
152
+ cmd = (cmd
153
+ + " -device pcie-pci-bridge,id=pcie.3,bus=pcie.0,addr=0x0"
154
+ + " -device pcie-pci-bridge,id=pcie.4,bus=pcie.1,addr=0x0"
155
+ + " -device pcie-pci-bridge,id=pcie.5,bus=pcie.2,addr=0x0"
156
+ )
157
+
158
+ # Put the NIC in slot 2 of the second PHB (1st is reserved for later)
159
+ cmd = (cmd
160
+ + " -netdev user,id=u1 -device e1000e,netdev=u1,mac={},bus=pcie.4,addr=2"
161
+ .format (self .mac_str )
149
162
)
163
+ prefilled_slots = 1
164
+
150
165
if self .cdrom is not None :
151
- # Put the CDROM on the second PHB
166
+ # Put the CDROM in slot 3 of the second PHB
152
167
cmd = (cmd
153
168
+ " -drive file={},id=cdrom01,if=none,media=cdrom" .format (self .cdrom )
154
- + " -device virtio-blk-pci,drive=cdrom01,id=virtio02,bus=pcie.1 ,addr=0 "
169
+ + " -device virtio-blk-pci,drive=cdrom01,id=virtio02,bus=pcie.4 ,addr=3 "
155
170
)
171
+ prefilled_slots += 1
172
+
173
+ bridges = []
174
+ bridges .append ({'bus' : 3 , 'n_devices' : 0 , 'bridged' : False })
175
+ bridges .append ({'bus' : 4 , 'n_devices' : prefilled_slots , 'bridged' : False })
176
+ bridges .append ({'bus' : 5 , 'n_devices' : 0 , 'bridged' : False })
177
+
178
+ # For any amount of disks we have, start finding spots for them in the PHBs
179
+ if self .disks :
180
+ diskid = 0
181
+ bid = 0
182
+ for disk in self .disks :
183
+ bridge = bridges [bid ]
184
+ if bridge ['n_devices' ] >= 30 :
185
+ # This bridge is full
186
+ if bid == len (bridges ) - 1 :
187
+ # All bridges full, find one to extend
188
+ if [x for x in bridges if x ['bridged' ] == False ] == []:
189
+ # We messed up and filled up all our slots
190
+ raise OpTestError ("Oops! We ran out of slots!" )
191
+ for i in range (0 , bid ):
192
+ if not bridges [i ]['bridged' ]:
193
+ # We can add a bridge here
194
+ parent = bridges [i ]['bus' ]
195
+ new = bridges [- 1 ]['bus' ] + 1
196
+ print ("Adding new bridge {} on bridge {}" .format (new , parent ))
197
+ bridges .append ({'bus' : new , 'n_devices' : 0 , 'bridged' : False })
198
+ cmd = cmd + " -device pcie-pci-bridge,id=pcie.{},bus=pcie.{},addr=0x1" .format (new , parent )
199
+ bid = bid + 1
200
+ bridges [i ]['bridged' ] = True
201
+ bridge = bridges [bid ]
202
+ break
203
+ else :
204
+ # Just move to the next one, subsequent bridge should
205
+ # always have slots
206
+ bid = bid + 1
207
+ bridge = bridges [bid ]
208
+ if bridge ['n_devices' ] >= 30 :
209
+ raise OpTestError ("Lost track of our PCI bridges!" )
210
+
211
+ # Got a bridge, let's go!
212
+ # Valid bridge slots are 1..31, but keep 1 free for more bridges
213
+ addr = 2 + bridge ['n_devices' ]
214
+ print ("Adding disk {} on bus {} at address {}" .format (diskid , bridge ['bus' ], addr ))
215
+ cmd = cmd + " -drive file={},id=disk{},if=none" .format (disk .name , diskid )
216
+ cmd = cmd + " -device virtio-blk-pci,drive=disk{},id=virtio{},bus=pcie.{},addr={}" .format (diskid , diskid , bridge ['bus' ], hex (addr ))
217
+ diskid += 1
218
+ bridge ['n_devices' ] += 1
219
+
156
220
# typical host ip=10.0.2.2 and typical skiroot 10.0.2.15
157
221
# use skiroot as the source, no sshd in skiroot
222
+
158
223
fru_path = os .path .join (OpTestConfiguration .conf .basedir , "test_binaries" , "qemu_fru" )
159
- cmd = cmd + " -nic user,model=virtio-net-pci"
160
224
cmd = cmd + " -device ipmi-bmc-sim,id=bmc0,frudatafile=" + fru_path + " -device isa-ipmi-bt,bmc=bmc0,irq=10"
161
225
cmd = cmd + " -serial none -device isa-serial,chardev=s1 -chardev stdio,id=s1,signal=off"
162
226
print (cmd )
@@ -236,6 +300,7 @@ class OpTestQemu():
236
300
def __init__ (self , conf = None , qemu_binary = None , pnor = None , skiboot = None ,
237
301
kernel = None , initramfs = None , cdrom = None ,
238
302
logfile = sys .stdout ):
303
+ self .disks = []
239
304
# need the conf object to properly bind opened object
240
305
# we need to be able to cleanup/close the temp file in signal handler
241
306
self .conf = conf
@@ -267,31 +332,28 @@ def __init__(self, conf=None, qemu_binary=None, pnor=None, skiboot=None,
267
332
" and then retry." )
268
333
raise e
269
334
335
+ self .disks .append (self .conf .args .qemu_scratch_disk )
270
336
atexit .register (self .__del__ )
271
337
self .console = QemuConsole (qemu_binary = qemu_binary ,
272
338
pnor = pnor ,
273
339
skiboot = skiboot ,
274
340
kernel = kernel ,
275
341
initramfs = initramfs ,
276
342
logfile = logfile ,
277
- hda = self .conf .args .qemu_scratch_disk .name ,
278
- cdrom = cdrom )
343
+ disks = self .disks , cdrom = cdrom )
279
344
self .ipmi = QemuIPMI (self .console )
280
345
self .system = None
281
346
282
347
def __del__ (self ):
283
- log . debug ( "OpTestQemu cleaning up qemu_scratch_disk={}"
284
- . format ( self . conf . args . qemu_scratch_disk ))
285
- if self .conf .args .qemu_scratch_disk :
348
+ for fd in self . disks :
349
+ log . debug ( "OpTestQemu cleaning up qemu_scratch_disk={}"
350
+ . format ( self .conf .args .qemu_scratch_disk ))
286
351
try :
287
- self .conf .args .qemu_scratch_disk .close ()
288
- self .conf .args .qemu_scratch_disk = None
289
- # if this was a temp file it will be deleted upon close
290
- # optest_handler closes if signal encountered
291
- log .debug ("OpTestQemu closed qemu_scratch_disk" )
352
+ fd .close ()
292
353
except Exception as e :
293
354
log .error ("OpTestQemu cleanup, ignoring Exception={}"
294
355
.format (e ))
356
+ self .disks = []
295
357
296
358
def set_system (self , system ):
297
359
self .console .system = system
@@ -332,3 +394,12 @@ def supports_ipmi_dcmi(self):
332
394
333
395
def has_ipmi_sel (self ):
334
396
return False
397
+
398
+ def add_temporary_disk (self , size ):
399
+ self .console .close ()
400
+
401
+ fd = tempfile .NamedTemporaryFile (delete = True )
402
+ self .disks .append (fd )
403
+ create_hda = subprocess .check_call (["qemu-img" , "create" ,
404
+ "-fqcow2" , fd .name , size ])
405
+ self .console .update_disks (self .disks )
0 commit comments