Skip to content

Commit

Permalink
fix: update code with changes in opi api
Browse files Browse the repository at this point in the history
Signed-off-by: Artsiom Koltun <[email protected]>
  • Loading branch information
artek-koltun authored and sandersms committed Jan 23, 2024
1 parent f17bd1b commit a039b89
Show file tree
Hide file tree
Showing 4 changed files with 63 additions and 63 deletions.
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ It is assumed that Intel IPU is already properly set up to be used with Intel OP
The following variables are used throughout this document:

| Variable | Description |
| -----------------| -------------------------------------------------------------------------------------------------------------------------------------------------- |
| ---------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- |
| BRIDGE_IP | opi-intel-bridge gRPC listening IP address e.g. 10.10.10.10 or localhost |
| BRIDGE_PORT | opi-intel-bridge gRPC listening port e.g. 50051 |
| BRIDGE_ADDR | BRIDGE_IP:BRIDGE_PORT |
Expand Down Expand Up @@ -127,30 +127,30 @@ or specify commands manually
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmeSubsystem "{nvme_subsystem : {spec : {nqn: 'nqn.2022-09.io.spdk:opitest2', serial_number: 'myserial2', model_number: 'mymodel2', max_namespaces: 11} }, nvme_subsystem_id : 'subsystem2' }"
grpc_cli call --json_input --json_output $BRIDGE_ADDR ListNvmeSubsystems "{}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR GetNvmeSubsystem "{name : '//storage.opiproject.org/subsystems/subsystem2'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmeController "{parent: '//storage.opiproject.org/subsystems/subsystem2', nvme_controller : {spec : {nvme_controller_id: 2, pcie_id : {physical_function : 0, virtual_function : 0, port_id: 0}, max_nsq:5, max_ncq:5, 'trtype': 'NVME_TRANSPORT_PCIE' } }, nvme_controller_id : 'controller1'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmeController "{parent: '//storage.opiproject.org/subsystems/subsystem2', nvme_controller : {spec : {nvme_controller_id: 2, pcie_id : {physical_function : 0, virtual_function : 0, port_id: 0}, max_nsq:5, max_ncq:5, 'trtype': 'NVME_TRANSPORT_TYPE_PCIE' } }, nvme_controller_id : 'controller1'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR ListNvmeControllers "{parent : '//storage.opiproject.org/subsystems/subsystem2'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR GetNvmeController "{name : '//storage.opiproject.org/subsystems/subsystem2/controllers/controller1'}"

# Nvme VF creation on PF0
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmeSubsystem "{nvme_subsystem : {spec : {nqn: 'nqn.2022-09.io.spdk:opitest3', serial_number: 'mev-opi-serial', model_number: 'mev-opi-model', max_namespaces: 11} }, nvme_subsystem_id : 'subsystem03' }"
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmeController "{parent: '//storage.opiproject.org/subsystems/subsystem03', nvme_controller : {spec : {nvme_controller_id: 2, pcie_id : {physical_function : 0, virtual_function : 3, port_id: 0 }, max_nsq:5, max_ncq:5, 'trtype': 'NVME_TRANSPORT_PCIE' } }, nvme_controller_id : 'controller3'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmeController "{parent: '//storage.opiproject.org/subsystems/subsystem03', nvme_controller : {spec : {nvme_controller_id: 2, pcie_id : {physical_function : 0, virtual_function : 3, port_id: 0 }, max_nsq:5, max_ncq:5, 'trtype': 'NVME_TRANSPORT_TYPE_PCIE' } }, nvme_controller_id : 'controller3'}"

# Create Nvme/TCP controller
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmeSubsystem "{nvme_subsystem : {spec : {nqn: 'nqn.2022-09.io.spdk:opitest4', serial_number: 'myserial2', model_number: 'mymodel2', max_namespaces: 11} }, nvme_subsystem_id : 'subsystem4' }"
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmeController "{'parent':'//storage.opiproject.org/subsystems/subsystem4','nvme_controller':{'spec':{'nvme_controller_id':2,'fabrics_id':{'traddr': '127.0.0.1', trsvcid: '4421', adrfam: 'NVME_ADRFAM_IPV4'}, 'max_nsq':5,'max_ncq':5, 'trtype': 'NVME_TRANSPORT_TCP'}},'nvme_controller_id':'controller4'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmeController "{'parent':'//storage.opiproject.org/subsystems/subsystem4','nvme_controller':{'spec':{'nvme_controller_id':2,'fabrics_id':{'traddr': '127.0.0.1', trsvcid: '4421', adrfam: 'NVME_ADDRESS_FAMILY_IPV4'}, 'max_nsq':5,'max_ncq':5, 'trtype': 'NVME_TRANSPORT_TYPE_TCP'}},'nvme_controller_id':'controller4'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR GetNvmeController "{name : '//storage.opiproject.org/subsystems/subsystem4/controllers/controller4'}"

# Connect to remote storage-target
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmeRemoteController "{nvme_remote_controller : {multipath: 'NVME_MULTIPATH_MULTIPATH'}, nvme_remote_controller_id: 'nvmetcp12'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR ListNvmeRemoteControllers "{parent: 'todo'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR GetNvmeRemoteController "{name: '//storage.opiproject.org/volumes/nvmetcp12'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmePath "{nvme_path : {controller_name_ref: '//storage.opiproject.org/volumes/nvmetcp12', traddr:'11.11.11.2', trtype:'NVME_TRANSPORT_TCP', fabrics:{subnqn:'nqn.2016-06.com.opi.spdk.target0', trsvcid:'4444', adrfam:'NVME_ADRFAM_IPV4', hostnqn:'nqn.2014-08.org.nvmexpress:uuid:feb98abe-d51f-40c8-b348-2753f3571d3c'}}, nvme_path_id: 'nvmetcp12path0'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmePath "{nvme_path : {controller_name_ref: '//storage.opiproject.org/volumes/nvmetcp12', traddr:'11.11.11.2', trtype:'NVME_TRANSPORT_TYPE_TCP', fabrics:{subnqn:'nqn.2016-06.com.opi.spdk.target0', trsvcid:'4444', adrfam:'NVME_ADDRESS_FAMILY_IPV4', hostnqn:'nqn.2014-08.org.nvmexpress:uuid:feb98abe-d51f-40c8-b348-2753f3571d3c'}}, nvme_path_id: 'nvmetcp12path0'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR ListNvmePaths "{parent : 'todo'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR GetNvmePath "{name: '//storage.opiproject.org/volumes/nvmetcp12path0'}"

# Connect to local PCIe storage-target
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmeRemoteController "{nvme_remote_controller : {multipath: 'NVME_MULTIPATH_DISABLE'}, nvme_remote_controller_id: 'nvmepcie13'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmePath "{nvme_path : {controller_name_ref: '//storage.opiproject.org/volumes/nvmepcie13', traddr:'0000:01:00.0', trtype:'NVME_TRANSPORT_PCIE'}, nvme_path_id: 'nvmepcie13path0'}"
grpc_cli call --json_input --json_output $BRIDGE_ADDR CreateNvmePath "{nvme_path : {controller_name_ref: '//storage.opiproject.org/volumes/nvmepcie13', traddr:'0000:01:00.0', trtype:'NVME_TRANSPORT_TYPE_PCIE'}, nvme_path_id: 'nvmepcie13path0'}"

# Virtio-blk PF creation (virtio-blk requires a volume, that's why it is created after connection to storage-target)
grpc_cli --json_input --json_output call $BRIDGE_ADDR CreateVirtioBlk "{virtio_blk_id: 'virtioblk0', virtio_blk : { volume_name_ref: 'nvmetcp12n0', pcie_id: { physical_function: '0', virtual_function: '0', port_id: '0'}}}"
Expand Down
4 changes: 2 additions & 2 deletions pkg/frontend/frontend.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@ func NewServer(jsonRPC spdk.JSONRPC, store gokv.Store) *Server {
opiSpdkServer := frontend.NewCustomizedServer(
jsonRPC, store,
map[pb.NvmeTransportType]frontend.NvmeTransport{
pb.NvmeTransportType_NVME_TRANSPORT_PCIE: NewNvmeNpiTransport(jsonRPC),
pb.NvmeTransportType_NVME_TRANSPORT_TCP: frontend.NewNvmeTCPTransport(jsonRPC),
pb.NvmeTransportType_NVME_TRANSPORT_TYPE_PCIE: NewNvmeNpiTransport(jsonRPC),
pb.NvmeTransportType_NVME_TRANSPORT_TYPE_TCP: frontend.NewNvmeTCPTransport(jsonRPC),
},
NewMevBlkTransport())
return &Server{
Expand Down
4 changes: 2 additions & 2 deletions pkg/frontend/nvme.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ func (s *Server) CreateNvmeController(ctx context.Context, in *pb.CreateNvmeCont

log.Printf("Passing request to opi-spdk-bridge")
response, err := s.FrontendNvmeServiceServer.CreateNvmeController(ctx, in)
if err == nil && in.GetNvmeController().GetSpec().GetTrtype() == pb.NvmeTransportType_NVME_TRANSPORT_PCIE {
if err == nil && in.GetNvmeController().GetSpec().GetTrtype() == pb.NvmeTransportType_NVME_TRANSPORT_TYPE_PCIE {
// response contains different QoS limits. It is an indication that
// opi-spdk-bridge returned an already existing controller providing idempotence
if !proto.Equal(response.Spec.MaxLimit, in.NvmeController.Spec.MaxLimit) ||
Expand Down Expand Up @@ -156,7 +156,7 @@ func (s *Server) UpdateNvmeController(ctx context.Context, in *pb.UpdateNvmeCont
originalNvmeController := s.nvme.Controllers[in.NvmeController.Name]
log.Printf("Passing request to opi-spdk-bridge")
response, err := s.FrontendNvmeServiceServer.UpdateNvmeController(ctx, in)
if err == nil && in.GetNvmeController().GetSpec().GetTrtype() == pb.NvmeTransportType_NVME_TRANSPORT_PCIE {
if err == nil && in.GetNvmeController().GetSpec().GetTrtype() == pb.NvmeTransportType_NVME_TRANSPORT_TYPE_PCIE {
if qosErr := s.setNvmeQosLimit(ctx, in.NvmeController); qosErr != nil {
log.Println("Failed to set qos settings:", qosErr)
log.Println("Restore original controller")
Expand Down
Loading

0 comments on commit a039b89

Please sign in to comment.