@@ -477,6 +477,10 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
477
477
list_del (& id_priv -> list );
478
478
cma_dev_put (id_priv -> cma_dev );
479
479
id_priv -> cma_dev = NULL ;
480
+ if (id_priv -> id .route .addr .dev_addr .sgid_attr ) {
481
+ rdma_put_gid_attr (id_priv -> id .route .addr .dev_addr .sgid_attr );
482
+ id_priv -> id .route .addr .dev_addr .sgid_attr = NULL ;
483
+ }
480
484
mutex_unlock (& lock );
481
485
}
482
486
@@ -1861,9 +1865,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
1861
1865
1862
1866
kfree (id_priv -> id .route .path_rec );
1863
1867
1864
- if (id_priv -> id .route .addr .dev_addr .sgid_attr )
1865
- rdma_put_gid_attr (id_priv -> id .route .addr .dev_addr .sgid_attr );
1866
-
1867
1868
put_net (id_priv -> id .route .addr .dev_addr .net );
1868
1869
rdma_restrack_del (& id_priv -> res );
1869
1870
kfree (id_priv );
@@ -2495,30 +2496,31 @@ static int cma_listen_handler(struct rdma_cm_id *id,
2495
2496
return id_priv -> id .event_handler (id , event );
2496
2497
}
2497
2498
2498
- static void cma_listen_on_dev (struct rdma_id_private * id_priv ,
2499
- struct cma_device * cma_dev )
2499
+ static int cma_listen_on_dev (struct rdma_id_private * id_priv ,
2500
+ struct cma_device * cma_dev ,
2501
+ struct rdma_id_private * * to_destroy )
2500
2502
{
2501
2503
struct rdma_id_private * dev_id_priv ;
2502
2504
struct net * net = id_priv -> id .route .addr .dev_addr .net ;
2503
2505
int ret ;
2504
2506
2505
2507
lockdep_assert_held (& lock );
2506
2508
2509
+ * to_destroy = NULL ;
2507
2510
if (cma_family (id_priv ) == AF_IB && !rdma_cap_ib_cm (cma_dev -> device , 1 ))
2508
- return ;
2511
+ return 0 ;
2509
2512
2510
2513
dev_id_priv =
2511
2514
__rdma_create_id (net , cma_listen_handler , id_priv ,
2512
2515
id_priv -> id .ps , id_priv -> id .qp_type , id_priv );
2513
2516
if (IS_ERR (dev_id_priv ))
2514
- return ;
2517
+ return PTR_ERR ( dev_id_priv ) ;
2515
2518
2516
2519
dev_id_priv -> state = RDMA_CM_ADDR_BOUND ;
2517
2520
memcpy (cma_src_addr (dev_id_priv ), cma_src_addr (id_priv ),
2518
2521
rdma_addr_size (cma_src_addr (id_priv )));
2519
2522
2520
2523
_cma_attach_to_dev (dev_id_priv , cma_dev );
2521
- list_add_tail (& dev_id_priv -> listen_list , & id_priv -> listen_list );
2522
2524
cma_id_get (id_priv );
2523
2525
dev_id_priv -> internal_id = 1 ;
2524
2526
dev_id_priv -> afonly = id_priv -> afonly ;
@@ -2527,19 +2529,42 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
2527
2529
2528
2530
ret = rdma_listen (& dev_id_priv -> id , id_priv -> backlog );
2529
2531
if (ret )
2530
- dev_warn (& cma_dev -> device -> dev ,
2531
- "RDMA CMA: cma_listen_on_dev, error %d\n" , ret );
2532
+ goto err_listen ;
2533
+ list_add_tail (& dev_id_priv -> listen_list , & id_priv -> listen_list );
2534
+ return 0 ;
2535
+ err_listen :
2536
+ /* Caller must destroy this after releasing lock */
2537
+ * to_destroy = dev_id_priv ;
2538
+ dev_warn (& cma_dev -> device -> dev , "RDMA CMA: %s, error %d\n" , __func__ , ret );
2539
+ return ret ;
2532
2540
}
2533
2541
2534
- static void cma_listen_on_all (struct rdma_id_private * id_priv )
2542
+ static int cma_listen_on_all (struct rdma_id_private * id_priv )
2535
2543
{
2544
+ struct rdma_id_private * to_destroy ;
2536
2545
struct cma_device * cma_dev ;
2546
+ int ret ;
2537
2547
2538
2548
mutex_lock (& lock );
2539
2549
list_add_tail (& id_priv -> list , & listen_any_list );
2540
- list_for_each_entry (cma_dev , & dev_list , list )
2541
- cma_listen_on_dev (id_priv , cma_dev );
2550
+ list_for_each_entry (cma_dev , & dev_list , list ) {
2551
+ ret = cma_listen_on_dev (id_priv , cma_dev , & to_destroy );
2552
+ if (ret ) {
2553
+ /* Prevent racing with cma_process_remove() */
2554
+ if (to_destroy )
2555
+ list_del_init (& to_destroy -> list );
2556
+ goto err_listen ;
2557
+ }
2558
+ }
2542
2559
mutex_unlock (& lock );
2560
+ return 0 ;
2561
+
2562
+ err_listen :
2563
+ list_del (& id_priv -> list );
2564
+ mutex_unlock (& lock );
2565
+ if (to_destroy )
2566
+ rdma_destroy_id (& to_destroy -> id );
2567
+ return ret ;
2543
2568
}
2544
2569
2545
2570
void rdma_set_service_type (struct rdma_cm_id * id , int tos )
@@ -3692,8 +3717,11 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
3692
3717
ret = - ENOSYS ;
3693
3718
goto err ;
3694
3719
}
3695
- } else
3696
- cma_listen_on_all (id_priv );
3720
+ } else {
3721
+ ret = cma_listen_on_all (id_priv );
3722
+ if (ret )
3723
+ goto err ;
3724
+ }
3697
3725
3698
3726
return 0 ;
3699
3727
err :
@@ -4773,69 +4801,6 @@ static struct notifier_block cma_nb = {
4773
4801
.notifier_call = cma_netdev_callback
4774
4802
};
4775
4803
4776
- static int cma_add_one (struct ib_device * device )
4777
- {
4778
- struct cma_device * cma_dev ;
4779
- struct rdma_id_private * id_priv ;
4780
- unsigned int i ;
4781
- unsigned long supported_gids = 0 ;
4782
- int ret ;
4783
-
4784
- cma_dev = kmalloc (sizeof * cma_dev , GFP_KERNEL );
4785
- if (!cma_dev )
4786
- return - ENOMEM ;
4787
-
4788
- cma_dev -> device = device ;
4789
- cma_dev -> default_gid_type = kcalloc (device -> phys_port_cnt ,
4790
- sizeof (* cma_dev -> default_gid_type ),
4791
- GFP_KERNEL );
4792
- if (!cma_dev -> default_gid_type ) {
4793
- ret = - ENOMEM ;
4794
- goto free_cma_dev ;
4795
- }
4796
-
4797
- cma_dev -> default_roce_tos = kcalloc (device -> phys_port_cnt ,
4798
- sizeof (* cma_dev -> default_roce_tos ),
4799
- GFP_KERNEL );
4800
- if (!cma_dev -> default_roce_tos ) {
4801
- ret = - ENOMEM ;
4802
- goto free_gid_type ;
4803
- }
4804
-
4805
- rdma_for_each_port (device , i ) {
4806
- supported_gids = roce_gid_type_mask_support (device , i );
4807
- WARN_ON (!supported_gids );
4808
- if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE ))
4809
- cma_dev -> default_gid_type [i - rdma_start_port (device )] =
4810
- CMA_PREFERRED_ROCE_GID_TYPE ;
4811
- else
4812
- cma_dev -> default_gid_type [i - rdma_start_port (device )] =
4813
- find_first_bit (& supported_gids , BITS_PER_LONG );
4814
- cma_dev -> default_roce_tos [i - rdma_start_port (device )] = 0 ;
4815
- }
4816
-
4817
- init_completion (& cma_dev -> comp );
4818
- refcount_set (& cma_dev -> refcount , 1 );
4819
- INIT_LIST_HEAD (& cma_dev -> id_list );
4820
- ib_set_client_data (device , & cma_client , cma_dev );
4821
-
4822
- mutex_lock (& lock );
4823
- list_add_tail (& cma_dev -> list , & dev_list );
4824
- list_for_each_entry (id_priv , & listen_any_list , list )
4825
- cma_listen_on_dev (id_priv , cma_dev );
4826
- mutex_unlock (& lock );
4827
-
4828
- trace_cm_add_one (device );
4829
- return 0 ;
4830
-
4831
- free_gid_type :
4832
- kfree (cma_dev -> default_gid_type );
4833
-
4834
- free_cma_dev :
4835
- kfree (cma_dev );
4836
- return ret ;
4837
- }
4838
-
4839
4804
static void cma_send_device_removal_put (struct rdma_id_private * id_priv )
4840
4805
{
4841
4806
struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
@@ -4898,6 +4863,80 @@ static void cma_process_remove(struct cma_device *cma_dev)
4898
4863
wait_for_completion (& cma_dev -> comp );
4899
4864
}
4900
4865
4866
+ static int cma_add_one (struct ib_device * device )
4867
+ {
4868
+ struct rdma_id_private * to_destroy ;
4869
+ struct cma_device * cma_dev ;
4870
+ struct rdma_id_private * id_priv ;
4871
+ unsigned int i ;
4872
+ unsigned long supported_gids = 0 ;
4873
+ int ret ;
4874
+
4875
+ cma_dev = kmalloc (sizeof (* cma_dev ), GFP_KERNEL );
4876
+ if (!cma_dev )
4877
+ return - ENOMEM ;
4878
+
4879
+ cma_dev -> device = device ;
4880
+ cma_dev -> default_gid_type = kcalloc (device -> phys_port_cnt ,
4881
+ sizeof (* cma_dev -> default_gid_type ),
4882
+ GFP_KERNEL );
4883
+ if (!cma_dev -> default_gid_type ) {
4884
+ ret = - ENOMEM ;
4885
+ goto free_cma_dev ;
4886
+ }
4887
+
4888
+ cma_dev -> default_roce_tos = kcalloc (device -> phys_port_cnt ,
4889
+ sizeof (* cma_dev -> default_roce_tos ),
4890
+ GFP_KERNEL );
4891
+ if (!cma_dev -> default_roce_tos ) {
4892
+ ret = - ENOMEM ;
4893
+ goto free_gid_type ;
4894
+ }
4895
+
4896
+ rdma_for_each_port (device , i ) {
4897
+ supported_gids = roce_gid_type_mask_support (device , i );
4898
+ WARN_ON (!supported_gids );
4899
+ if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE ))
4900
+ cma_dev -> default_gid_type [i - rdma_start_port (device )] =
4901
+ CMA_PREFERRED_ROCE_GID_TYPE ;
4902
+ else
4903
+ cma_dev -> default_gid_type [i - rdma_start_port (device )] =
4904
+ find_first_bit (& supported_gids , BITS_PER_LONG );
4905
+ cma_dev -> default_roce_tos [i - rdma_start_port (device )] = 0 ;
4906
+ }
4907
+
4908
+ init_completion (& cma_dev -> comp );
4909
+ refcount_set (& cma_dev -> refcount , 1 );
4910
+ INIT_LIST_HEAD (& cma_dev -> id_list );
4911
+ ib_set_client_data (device , & cma_client , cma_dev );
4912
+
4913
+ mutex_lock (& lock );
4914
+ list_add_tail (& cma_dev -> list , & dev_list );
4915
+ list_for_each_entry (id_priv , & listen_any_list , list ) {
4916
+ ret = cma_listen_on_dev (id_priv , cma_dev , & to_destroy );
4917
+ if (ret )
4918
+ goto free_listen ;
4919
+ }
4920
+ mutex_unlock (& lock );
4921
+
4922
+ trace_cm_add_one (device );
4923
+ return 0 ;
4924
+
4925
+ free_listen :
4926
+ list_del (& cma_dev -> list );
4927
+ mutex_unlock (& lock );
4928
+
4929
+ /* cma_process_remove() will delete to_destroy */
4930
+ cma_process_remove (cma_dev );
4931
+ kfree (cma_dev -> default_roce_tos );
4932
+ free_gid_type :
4933
+ kfree (cma_dev -> default_gid_type );
4934
+
4935
+ free_cma_dev :
4936
+ kfree (cma_dev );
4937
+ return ret ;
4938
+ }
4939
+
4901
4940
static void cma_remove_one (struct ib_device * device , void * client_data )
4902
4941
{
4903
4942
struct cma_device * cma_dev = client_data ;
0 commit comments