<div dir="ltr">Sorry, I found that the latest code function has become amdgpu_cs_pass1, and radeon_cs_parser_init has the same problem.And i will send the patch.</div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">whitehat002 whitehat002 <<a href="mailto:hackyzh002@gmail.com">hackyzh002@gmail.com</a>> 于2023年4月18日周二 11:39写道:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex"><div dir="ltr"><div>Hello, </div><div><br></div><div>I am going to file a security bug.<br></div><div><br></div>VULNERABILITY DETAILS<br><br>ioctl$AMDGPU_CS will call amdgpu_cs_ioctl which will call amdgpu_cs_parser_init. The type of size is unsigned(4 bytes)[1]. And size is assigned from p->chunks[i].length_dw[2] which is assigned from user_chunk.length_dw[3], which type is __u32[4](4 bytes, under user control). If size is 0x40000000, there will be an integer overflow, size will be zero after size = sizeof(uint32_t)[5]. Although there is an overflow check in kvmalloc_array[6], but it will just check size_t overflow(8 bytes), so it will not notice this one. copy_from_user will not copy anything, if size is zero. So p->chunks[i].kdata will be filled with the last time used data, because kvmalloc_array[6] is called without __GFP_ZERO flag. Finally it will access the uninitialized data[7].<br><br>```<br>struct drm_amdgpu_cs_chunk {<br>      __u32           chunk_id;<br>     __u32           length_dw;                                                              // [4]<br>        __u64           chunk_data;<br>};<br><br><br>static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)<br>{<br> struct amdgpu_fpriv *fpriv = p->filp->driver_priv;<br>      struct amdgpu_vm *vm = &fpriv->vm;<br>     uint64_t *chunk_array_user;<br>   uint64_t *chunk_array;<br>        unsigned size, num_ibs = 0;                                                     // [1]<br>        uint32_t uf_offset = 0;<br>       int i;<br>        int ret;<br><br>    if (cs->in.num_chunks == 0)<br>                return -EINVAL;<br><br>     chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);<br>     if (!chunk_array)<br>             return -ENOMEM;<br><br>     p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);<br>  if (!p->ctx) {<br>             ret = -EINVAL;<br>                goto free_chunk;<br>      }<br><br>   /* skip guilty context job */<br> if (atomic_read(&p->ctx->guilty) == 1) {<br>            ret = -ECANCELED;<br>             goto free_chunk;<br>      }<br><br>   /* get chunks */<br>      chunk_array_user = u64_to_user_ptr(cs->in.chunks);<br> if (copy_from_user(chunk_array, chunk_array_user,<br>                        sizeof(uint64_t)*cs->in.num_chunks)) {<br>         ret = -EFAULT;<br>                goto free_chunk;<br>      }<br><br>   p->nchunks = cs->in.num_chunks;<br> p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),<br>                      GFP_KERNEL);<br>    if (!p->chunks) {<br>          ret = -ENOMEM;<br>                goto free_chunk;<br>      }<br><br>   for (i = 0; i < p->nchunks; i++) {<br>              struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;<br>         struct drm_amdgpu_cs_chunk user_chunk;<br>                uint32_t __user *cdata;<br><br>             chunk_ptr = u64_to_user_ptr(chunk_array[i]);<br>          if (copy_from_user(&user_chunk, chunk_ptr,<br>                                       sizeof(struct drm_amdgpu_cs_chunk))) {<br>                      ret = -EFAULT;<br>                        i--;<br>                  goto free_partial_kdata;<br>              }<br>             p->chunks[i].chunk_id = user_chunk.chunk_id;<br>               p->chunks[i].length_dw = user_chunk.length_dw;                       // [3]<br><br>              size = p->chunks[i].length_dw;                                                       // [2]<br>                cdata = u64_to_user_ptr(user_chunk.chunk_data);<br><br>             p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);             // [6]<br>                if (p->chunks[i].kdata == NULL) {<br>                  ret = -ENOMEM;<br>                        i--;<br>                  goto free_partial_kdata;<br>              }<br>             size *= sizeof(uint32_t);                                                               // [5]<br>                if (copy_from_user(p->chunks[i].kdata, cdata, size)) {<br>                     ret = -EFAULT;<br>                        goto free_partial_kdata;<br>              }<br><br>           switch (p->chunks[i].chunk_id) {<br>           case AMDGPU_CHUNK_ID_IB:<br>                      ++num_ibs;<br>                    break;<br><br>              case AMDGPU_CHUNK_ID_FENCE:<br>                   size = sizeof(struct drm_amdgpu_cs_chunk_fence);<br>                      if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {<br>                         ret = -EINVAL;<br>                                goto free_partial_kdata;<br>                      }<br><br>                   ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,              //[7]<br>                                                  &uf_offset);<br>                     if (ret)<br>                              goto free_partial_kdata;<br><br>                    break;<br><br>              case AMDGPU_CHUNK_ID_BO_HANDLES:<br>                      size = sizeof(struct drm_amdgpu_bo_list_in);<br>                  if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {<br>                         ret = -EINVAL;<br>                                goto free_partial_kdata;<br>                      }<br><br>                   ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);<br>                   if (ret)<br>                              goto free_partial_kdata;<br><br>                    break;<br><br>              case AMDGPU_CHUNK_ID_DEPENDENCIES:<br>            case AMDGPU_CHUNK_ID_SYNCOBJ_IN:<br>              case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:<br>             case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:<br>          case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:<br>           case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:<br>                 break;<br><br>              default:<br>                      ret = -EINVAL;<br>                        goto free_partial_kdata;<br>              }<br>     }<br><br>   ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);<br>      if (ret)<br>              goto free_all_kdata;<br><br>        if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {<br>             ret = -ECANCELED;<br>             goto free_all_kdata;<br>  }<br><br>   if (p-><a href="http://uf_entry.tv.bo" target="_blank">uf_entry.tv.bo</a>)<br>         p->job->uf_addr = uf_offset;<br>    kvfree(chunk_array);<br><br>        /* Use this opportunity to fill in task info for the vm */<br>    amdgpu_vm_set_task_info(vm);<br><br>        return 0;<br><br>free_all_kdata:<br>  i = p->nchunks - 1;<br>free_partial_kdata:<br>   for (; i >= 0; i--)<br>                kvfree(p->chunks[i].kdata);<br>        kvfree(p->chunks);<br> p->chunks = NULL;<br>  p->nchunks = 0;<br>free_chunk:<br>       kvfree(chunk_array);<br><br>        return ret;<br>}<br></div>
</blockquote></div>