@@ -92,19 +92,10 @@ class uvma_obi_memory_drv_c extends uvm_driver#(
92
92
93
93
/* *
94
94
* Drives the virtual interface's (cntxt.vif) signals using req's contents.
95
+ * This task handles both READ and WRITE transactions.
95
96
*/
96
97
extern task drv_mstr_req (ref uvma_obi_memory_mstr_seq_item_c req);
97
98
98
- /* *
99
- * Drives the virtual interface's (cntxt.vif) signals using req's contents.
100
- */
101
- extern task drv_mstr_read_req (ref uvma_obi_memory_mstr_seq_item_c req);
102
-
103
- /* *
104
- * Drives the virtual interface's (cntxt.vif) signals using req's contents.
105
- */
106
- extern task drv_mstr_write_req (ref uvma_obi_memory_mstr_seq_item_c req);
107
-
108
99
/* *
109
100
* Drives the virtual interface's (cntxt.vif) signals using req's contents.
110
101
*/
@@ -357,30 +348,13 @@ task uvma_obi_memory_drv_c::prep_req(ref uvma_obi_memory_base_seq_item_c req);
357
348
358
349
endtask : prep_req
359
350
360
-
351
+ // Both Master READ and WRITE transactions are handled here because the signalling is almost identical.
361
352
task uvma_obi_memory_drv_c::drv_mstr_req (ref uvma_obi_memory_mstr_seq_item_c req);
362
353
363
- case (req.access_type)
364
- UVMA_OBI_MEMORY_ACCESS_READ : begin
365
- drv_mstr_read_req (req);
366
- end
367
-
368
- UVMA_OBI_MEMORY_ACCESS_WRITE : begin
369
- drv_mstr_write_req (req);
370
- end
371
-
372
- default : `uvm_fatal (" OBI_MEMORY_DRV" , $sformatf (" Invalid access_type: %0d " , req.access_type))
373
- endcase
374
-
375
- endtask : drv_mstr_req
376
-
377
-
378
- // This task has redundant code with drv_mstr_write_req for the request and
379
- // address phases. Rather than create a new method for the common code, the
380
- // waiver pragmas (@DVT) are placed to warn future maintainers of the situation.
381
- task uvma_obi_memory_drv_c::drv_mstr_read_req (ref uvma_obi_memory_mstr_seq_item_c req);
354
+ if (req.access_type != UVMA_OBI_MEMORY_ACCESS_READ || req.access_type != UVMA_OBI_MEMORY_ACCESS_WRITE ) begin
355
+ `uvm_fatal (" OBI_MEMORY_DRV" , $sformatf (" Invalid access_type: %0d " , req.access_type))
356
+ end
382
357
383
- // @DVT_LINTER_WAIVER_START "MT20211004_1" disable SVTB.33.1.0, SVTB.33.2.0
384
358
// Req Latency cycles
385
359
repeat (req.req_latency) begin
386
360
@ (mstr_mp.drv_mstr_cb);
@@ -392,7 +366,6 @@ task uvma_obi_memory_drv_c::drv_mstr_read_req(ref uvma_obi_memory_mstr_seq_item_
392
366
for (int unsigned ii= 0 ; ii< cfg.addr_width; ii++ ) begin
393
367
mstr_mp.drv_mstr_cb.addr[ii] <= req.address[ii];
394
368
end
395
- // @DVT_LINTER_WAIVER_END "MT20211004_1"
396
369
for (int unsigned ii= 0 ; ii< (cfg.data_width/ 8 ); ii++ ) begin
397
370
mstr_mp.drv_mstr_cb.be[ii] <= req.be[ii];
398
371
end
@@ -403,6 +376,16 @@ task uvma_obi_memory_drv_c::drv_mstr_read_req(ref uvma_obi_memory_mstr_seq_item_
403
376
mstr_mp.drv_mstr_cb.aid[ii] <= req.id[ii];
404
377
end
405
378
379
+ // Handle WRITE
380
+ if (req.access_type == UVMA_OBI_MEMORY_ACCESS_WRITE ) begin
381
+ for (int unsigned ii= 0 ; ii< cfg.data_width; ii++ ) begin
382
+ mstr_mp.drv_mstr_cb.wdata[ii] <= req.wdata[ii];
383
+ end
384
+ for (int unsigned ii= 0 ; ii< cfg.wuser_width; ii++ ) begin
385
+ mstr_mp.drv_mstr_cb.wuser[ii] <= req.wuser[ii];
386
+ end
387
+ end
388
+
406
389
// Wait for grant
407
390
while (mstr_mp.drv_mstr_cb.gnt !== 1'b1 ) begin
408
391
@ (mstr_mp.drv_mstr_cb);
@@ -438,77 +421,7 @@ task uvma_obi_memory_drv_c::drv_mstr_read_req(ref uvma_obi_memory_mstr_seq_item_
438
421
@ (mstr_mp.drv_mstr_cb);
439
422
end
440
423
441
- endtask : drv_mstr_read_req
442
-
443
-
444
- // This task has redundant code with drv_mstr_read_req for the request and
445
- // address phases. Rather than create a new method for the common code, the
446
- // waiver pragmas (@DVT) are placed to warn future maintainers of the situation.
447
- task uvma_obi_memory_drv_c::drv_mstr_write_req (ref uvma_obi_memory_mstr_seq_item_c req);
448
-
449
- // @DVT_LINTER_WAIVER_START "MT20210901_3" disable SVTB.33.1.0, SVTB.33.2.0
450
- // Req Latency cycles
451
- repeat (req.req_latency) begin
452
- @ (mstr_mp.drv_mstr_cb);
453
- end
454
-
455
- // Address phase
456
- mstr_mp.drv_mstr_cb.req <= 1'b1 ;
457
- mstr_mp.drv_mstr_cb.we <= req.access_type;
458
- for (int unsigned ii= 0 ; ii< cfg.addr_width; ii++ ) begin
459
- mstr_mp.drv_mstr_cb.addr[ii] <= req.address[ii];
460
- end
461
- // @DVT_LINTER_WAIVER_END "MT20210901_3"
462
- for (int unsigned ii= 0 ; ii< cfg.data_width; ii++ ) begin
463
- mstr_mp.drv_mstr_cb.wdata[ii] <= req.wdata[ii];
464
- end
465
- for (int unsigned ii= 0 ; ii< (cfg.data_width/ 8 ); ii++ ) begin
466
- mstr_mp.drv_mstr_cb.be[ii] <= req.be[ii];
467
- end
468
- for (int unsigned ii= 0 ; ii< cfg.auser_width; ii++ ) begin
469
- mstr_mp.drv_mstr_cb.auser[ii] <= req.auser[ii];
470
- end
471
- for (int unsigned ii= 0 ; ii< cfg.wuser_width; ii++ ) begin
472
- mstr_mp.drv_mstr_cb.wuser[ii] <= req.wuser[ii];
473
- end
474
- for (int unsigned ii= 0 ; ii< cfg.id_width; ii++ ) begin
475
- mstr_mp.drv_mstr_cb.aid[ii] <= req.id[ii];
476
- end
477
-
478
- // Wait for grant
479
- while (mstr_mp.drv_mstr_cb.gnt !== 1'b1 ) begin
480
- @ (mstr_mp.drv_mstr_cb);
481
- end
482
-
483
- // Wait for rvalid
484
- while (mstr_mp.drv_mstr_cb.rvalid !== 1'b1 ) begin
485
- @ (mstr_mp.drv_mstr_cb);
486
- end
487
- repeat (req.rready_latency) begin
488
- @ (mstr_mp.drv_mstr_cb);
489
- end
490
-
491
- // Response phase
492
- mstr_mp.drv_mstr_cb.rready <= 1'b1 ;
493
- mstr_mp.drv_mstr_cb.req <= 1'b0 ;
494
- repeat (req.rready_hold) begin
495
- if (mstr_mp.drv_mstr_cb.rvalid !== 1'b1 ) begin
496
- break ;
497
- end
498
- @ (mstr_mp.drv_mstr_cb);
499
- end
500
- while (mstr_mp.drv_mstr_cb.rvalid === 1'b1 ) begin
501
- @ (mstr_mp.drv_mstr_cb);
502
- end
503
-
504
- // Tail
505
- mstr_mp.drv_mstr_cb.rready <= 1'b0 ;
506
- drv_mstr_idle ();
507
- repeat (req.tail_length) begin
508
- @ (mstr_mp.drv_mstr_cb);
509
- end
510
-
511
- endtask : drv_mstr_write_req
424
+ endtask : drv_mstr_req
512
425
513
426
514
427
task uvma_obi_memory_drv_c::drv_slv_req (ref uvma_obi_memory_slv_seq_item_c req);
0 commit comments