@@ -406,7 +406,7 @@ static void smbd_post_send_credits(struct work_struct *work)
406406 else
407407 response = get_empty_queue_buffer (info );
408408 if (!response ) {
409- /* now switch to emtpy packet queue */
409+ /* now switch to empty packet queue */
410410 if (use_receive_queue ) {
411411 use_receive_queue = 0 ;
412412 continue ;
@@ -618,7 +618,7 @@ static struct rdma_cm_id *smbd_create_id(
618618
619619/*
620620 * Test if FRWR (Fast Registration Work Requests) is supported on the device
621- * This implementation requries FRWR on RDMA read/write
621+ * This implementation requires FRWR on RDMA read/write
622622 * return value: true if it is supported
623623 */
624624static bool frwr_is_supported (struct ib_device_attr * attrs )
@@ -2177,7 +2177,7 @@ static int allocate_mr_list(struct smbd_connection *info)
21772177 * MR available in the list. It may access the list while the
21782178 * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
21792179 * as they never modify the same places. However, there may be several CPUs
2180- * issueing I/O trying to get MR at the same time, mr_list_lock is used to
2180+ * issuing I/O trying to get MR at the same time, mr_list_lock is used to
21812181 * protect this situation.
21822182 */
21832183static struct smbd_mr * get_mr (struct smbd_connection * info )
@@ -2311,7 +2311,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
23112311 /*
23122312 * There is no need for waiting for complemtion on ib_post_send
23132313 * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
2314- * on the next ib_post_send when we actaully send I/O to remote peer
2314+ * on the next ib_post_send when we actually send I/O to remote peer
23152315 */
23162316 rc = ib_post_send (info -> id -> qp , & reg_wr -> wr , NULL );
23172317 if (!rc )
0 commit comments