@@ -9,66 +9,78 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
99 struct ib_udata * udata )
1010{
1111 struct mana_ib_cq * cq = container_of (ibcq , struct mana_ib_cq , ibcq );
12+ struct mana_ib_create_cq_resp resp = {};
13+ struct mana_ib_ucontext * mana_ucontext ;
1214 struct ib_device * ibdev = ibcq -> device ;
1315 struct mana_ib_create_cq ucmd = {};
1416 struct mana_ib_dev * mdev ;
15- struct gdma_context * gc ;
17+ bool is_rnic_cq ;
18+ u32 doorbell ;
1619 int err ;
1720
1821 mdev = container_of (ibdev , struct mana_ib_dev , ib_dev );
19- gc = mdev -> gdma_dev -> gdma_context ;
2022
21- if ( udata -> inlen < sizeof ( ucmd ))
22- return - EINVAL ;
23+ cq -> comp_vector = attr -> comp_vector % ibdev -> num_comp_vectors ;
24+ cq -> cq_handle = INVALID_MANA_HANDLE ;
2325
24- if (attr -> comp_vector > gc -> max_num_queues )
26+ if (udata -> inlen < offsetof( struct mana_ib_create_cq , flags ) )
2527 return - EINVAL ;
2628
27- cq -> comp_vector = attr -> comp_vector ;
28-
2929 err = ib_copy_from_udata (& ucmd , udata , min (sizeof (ucmd ), udata -> inlen ));
3030 if (err ) {
3131 ibdev_dbg (ibdev ,
3232 "Failed to copy from udata for create cq, %d\n" , err );
3333 return err ;
3434 }
3535
36- if (attr -> cqe > mdev -> adapter_caps .max_qp_wr ) {
36+ is_rnic_cq = !!(ucmd .flags & MANA_IB_CREATE_RNIC_CQ );
37+
38+ if (!is_rnic_cq && attr -> cqe > mdev -> adapter_caps .max_qp_wr ) {
3739 ibdev_dbg (ibdev , "CQE %d exceeding limit\n" , attr -> cqe );
3840 return - EINVAL ;
3941 }
4042
4143 cq -> cqe = attr -> cqe ;
42- cq -> umem = ib_umem_get (ibdev , ucmd .buf_addr , cq -> cqe * COMP_ENTRY_SIZE ,
43- IB_ACCESS_LOCAL_WRITE );
44- if (IS_ERR (cq -> umem )) {
45- err = PTR_ERR (cq -> umem );
46- ibdev_dbg (ibdev , "Failed to get umem for create cq, err %d\n" ,
47- err );
44+ err = mana_ib_create_queue (mdev , ucmd .buf_addr , cq -> cqe * COMP_ENTRY_SIZE , & cq -> queue );
45+ if (err ) {
46+ ibdev_dbg (ibdev , "Failed to create queue for create cq, %d\n" , err );
4847 return err ;
4948 }
5049
51- err = mana_ib_gd_create_dma_region (mdev , cq -> umem , & cq -> gdma_region );
52- if (err ) {
53- ibdev_dbg (ibdev ,
54- "Failed to create dma region for create cq, %d\n" ,
55- err );
56- goto err_release_umem ;
50+ mana_ucontext = rdma_udata_to_drv_context (udata , struct mana_ib_ucontext ,
51+ ibucontext );
52+ doorbell = mana_ucontext -> doorbell ;
53+
54+ if (is_rnic_cq ) {
55+ err = mana_ib_gd_create_cq (mdev , cq , doorbell );
56+ if (err ) {
57+ ibdev_dbg (ibdev , "Failed to create RNIC cq, %d\n" , err );
58+ goto err_destroy_queue ;
59+ }
60+
61+ err = mana_ib_install_cq_cb (mdev , cq );
62+ if (err ) {
63+ ibdev_dbg (ibdev , "Failed to install cq callback, %d\n" , err );
64+ goto err_destroy_rnic_cq ;
65+ }
5766 }
5867
59- ibdev_dbg (ibdev ,
60- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n" ,
61- err , cq -> gdma_region );
62-
63- /*
64- * The CQ ID is not known at this time. The ID is generated at create_qp
65- */
66- cq -> id = INVALID_QUEUE_ID ;
68+ resp .cqid = cq -> queue .id ;
69+ err = ib_copy_to_udata (udata , & resp , min (sizeof (resp ), udata -> outlen ));
70+ if (err ) {
71+ ibdev_dbg (& mdev -> ib_dev , "Failed to copy to udata, %d\n" , err );
72+ goto err_remove_cq_cb ;
73+ }
6774
6875 return 0 ;
6976
70- err_release_umem :
71- ib_umem_release (cq -> umem );
77+ err_remove_cq_cb :
78+ mana_ib_remove_cq_cb (mdev , cq );
79+ err_destroy_rnic_cq :
80+ mana_ib_gd_destroy_cq (mdev , cq );
81+ err_destroy_queue :
82+ mana_ib_destroy_queue (mdev , & cq -> queue );
83+
7284 return err ;
7385}
7486
@@ -77,33 +89,57 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
7789 struct mana_ib_cq * cq = container_of (ibcq , struct mana_ib_cq , ibcq );
7890 struct ib_device * ibdev = ibcq -> device ;
7991 struct mana_ib_dev * mdev ;
80- struct gdma_context * gc ;
81- int err ;
8292
8393 mdev = container_of (ibdev , struct mana_ib_dev , ib_dev );
84- gc = mdev -> gdma_dev -> gdma_context ;
8594
86- err = mana_ib_gd_destroy_dma_region (mdev , cq -> gdma_region );
87- if (err ) {
88- ibdev_dbg (ibdev ,
89- "Failed to destroy dma region, %d\n" , err );
90- return err ;
91- }
95+ mana_ib_remove_cq_cb (mdev , cq );
9296
93- if ( cq -> id != INVALID_QUEUE_ID ) {
94- kfree ( gc -> cq_table [ cq -> id ]);
95- gc -> cq_table [ cq -> id ] = NULL ;
96- }
97+ /* Ignore return code as there is not much we can do about it.
98+ * The error message is printed inside.
99+ */
100+ mana_ib_gd_destroy_cq ( mdev , cq );
97101
98- ib_umem_release ( cq -> umem );
102+ mana_ib_destroy_queue ( mdev , & cq -> queue );
99103
100104 return 0 ;
101105}
102106
103- void mana_ib_cq_handler (void * ctx , struct gdma_queue * gdma_cq )
107+ static void mana_ib_cq_handler (void * ctx , struct gdma_queue * gdma_cq )
104108{
105109 struct mana_ib_cq * cq = ctx ;
106110
107111 if (cq -> ibcq .comp_handler )
108112 cq -> ibcq .comp_handler (& cq -> ibcq , cq -> ibcq .cq_context );
109113}
114+
115+ int mana_ib_install_cq_cb (struct mana_ib_dev * mdev , struct mana_ib_cq * cq )
116+ {
117+ struct gdma_context * gc = mdev_to_gc (mdev );
118+ struct gdma_queue * gdma_cq ;
119+
120+ if (cq -> queue .id >= gc -> max_num_cqs )
121+ return - EINVAL ;
122+ /* Create CQ table entry */
123+ WARN_ON (gc -> cq_table [cq -> queue .id ]);
124+ gdma_cq = kzalloc (sizeof (* gdma_cq ), GFP_KERNEL );
125+ if (!gdma_cq )
126+ return - ENOMEM ;
127+
128+ gdma_cq -> cq .context = cq ;
129+ gdma_cq -> type = GDMA_CQ ;
130+ gdma_cq -> cq .callback = mana_ib_cq_handler ;
131+ gdma_cq -> id = cq -> queue .id ;
132+ gc -> cq_table [cq -> queue .id ] = gdma_cq ;
133+ return 0 ;
134+ }
135+
136+ void mana_ib_remove_cq_cb (struct mana_ib_dev * mdev , struct mana_ib_cq * cq )
137+ {
138+ struct gdma_context * gc = mdev_to_gc (mdev );
139+
140+ if (cq -> queue .id >= gc -> max_num_cqs || cq -> queue .id == INVALID_QUEUE_ID )
141+ return ;
142+
143+ kfree (gc -> cq_table [cq -> queue .id ]);
144+ gc -> cq_table [cq -> queue .id ] = NULL ;
145+ }
0 commit comments