@@ -525,7 +525,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
525525 * for a resource and in that case, allocate
526526 * one, reserve and validate it.
527527 *
528- * @ticket: The ww aqcquire context to use, or NULL if trylocking.
528+ * @ticket: The ww acquire context to use, or NULL if trylocking.
529529 * @res: The resource for which to allocate a backup buffer.
530530 * @interruptible: Whether any sleeps during allocation should be
531531 * performed while interruptible.
@@ -686,7 +686,7 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
686686 * @intr: Perform waits interruptible if possible.
687687 * @dirtying: Pending GPU operation will dirty the resource
688688 *
689- * On succesful return, any backup DMA buffer pointed to by @res->backup will
689+ * On successful return, any backup DMA buffer pointed to by @res->backup will
690690 * be reserved and validated.
691691 * On hardware resource shortage, this function will repeatedly evict
692692 * resources of the same type until the validation succeeds.
@@ -804,7 +804,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
804804 * @dx_query_mob: Buffer containing the DX query MOB
805805 *
806806 * Read back cached states from the device if they exist. This function
807- * assumings binding_mutex is held.
807+ * assumes binding_mutex is held.
808808 */
809809int vmw_query_readback_all (struct vmw_buffer_object * dx_query_mob )
810810{
@@ -1125,7 +1125,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
11251125 }
11261126
11271127 /*
1128- * In order of increasing backup_offset, clean dirty resorces
1128+ * In order of increasing backup_offset, clean dirty resources
11291129 * intersecting the range.
11301130 */
11311131 while (found ) {
0 commit comments