Skip to content

Commit d96bb54

Browse files
Alex Williamsongregkh
authored andcommitted
vfio/type1: Remove locked page accounting workqueue
commit 0cfef2b7410b64d7a430947e0b533314c4f97153 upstream. If the mmap_sem is contented then the vfio type1 IOMMU backend will defer locked page accounting updates to a workqueue task. This has a few problems and depending on which side the user tries to play, they might be over-penalized for unmaps that haven't yet been accounted or race the workqueue to enter more mappings than they're allowed. The original intent of this workqueue mechanism seems to be focused on reducing latency through the ioctl, but we cannot do so at the cost of correctness. Remove this workqueue mechanism and update the callers to allow for failure. We can also now recheck the limit under write lock to make sure we don't exceed it. vfio_pin_pages_remote() also now necessarily includes an unwind path which we can jump to directly if the consecutive page pinning finds that we're exceeding the user's memory limits. This avoids the current lazy approach which does accounting and mapping up to the fault, only to return an error on the next iteration to unwind the entire vfio_dma. Cc: stable@vger.kernel.org Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Kirti Wankhede <kwankhede@nvidia.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 341adf5 commit d96bb54

1 file changed

Lines changed: 43 additions & 59 deletions

File tree

drivers/vfio/vfio_iommu_type1.c

Lines changed: 43 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -130,57 +130,34 @@ static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
130130
rb_erase(&old->node, &iommu->dma_list);
131131
}
132132

133-
struct vwork {
134-
struct mm_struct *mm;
135-
long npage;
136-
struct work_struct work;
137-
};
138-
139-
/* delayed decrement/increment for locked_vm */
140-
static void vfio_lock_acct_bg(struct work_struct *work)
133+
static int vfio_lock_acct(long npage, bool *lock_cap)
141134
{
142-
struct vwork *vwork = container_of(work, struct vwork, work);
143-
struct mm_struct *mm;
144-
145-
mm = vwork->mm;
146-
down_write(&mm->mmap_sem);
147-
mm->locked_vm += vwork->npage;
148-
up_write(&mm->mmap_sem);
149-
mmput(mm);
150-
kfree(vwork);
151-
}
135+
int ret = 0;
152136

153-
static void vfio_lock_acct(long npage)
154-
{
155-
struct vwork *vwork;
156-
struct mm_struct *mm;
137+
if (!npage)
138+
return 0;
157139

158-
if (!current->mm || !npage)
159-
return; /* process exited or nothing to do */
140+
if (!current->mm)
141+
return -ESRCH; /* process exited */
160142

161-
if (down_write_trylock(&current->mm->mmap_sem)) {
162-
current->mm->locked_vm += npage;
163-
up_write(&current->mm->mmap_sem);
164-
return;
165-
}
143+
down_write(&current->mm->mmap_sem);
144+
if (npage > 0) {
145+
if (lock_cap ? !*lock_cap : !capable(CAP_IPC_LOCK)) {
146+
unsigned long limit;
166147

167-
/*
168-
* Couldn't get mmap_sem lock, so must setup to update
169-
* mm->locked_vm later. If locked_vm were atomic, we
170-
* wouldn't need this silliness
171-
*/
172-
vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
173-
if (!vwork)
174-
return;
175-
mm = get_task_mm(current);
176-
if (!mm) {
177-
kfree(vwork);
178-
return;
148+
limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
149+
150+
if (current->mm->locked_vm + npage > limit)
151+
ret = -ENOMEM;
152+
}
179153
}
180-
INIT_WORK(&vwork->work, vfio_lock_acct_bg);
181-
vwork->mm = mm;
182-
vwork->npage = npage;
183-
schedule_work(&vwork->work);
154+
155+
if (!ret)
156+
current->mm->locked_vm += npage;
157+
158+
up_write(&current->mm->mmap_sem);
159+
160+
return ret;
184161
}
185162

186163
/*
@@ -262,9 +239,9 @@ static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
262239
static long vfio_pin_pages(unsigned long vaddr, long npage,
263240
int prot, unsigned long *pfn_base)
264241
{
265-
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
242+
unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
266243
bool lock_cap = capable(CAP_IPC_LOCK);
267-
long ret, i;
244+
long ret, i = 1;
268245
bool rsvd;
269246

270247
if (!current->mm)
@@ -283,16 +260,11 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
283260
return -ENOMEM;
284261
}
285262

286-
if (unlikely(disable_hugepages)) {
287-
if (!rsvd)
288-
vfio_lock_acct(1);
289-
return 1;
290-
}
263+
if (unlikely(disable_hugepages))
264+
goto out;
291265

292266
/* Lock all the consecutive pages from pfn_base */
293-
for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
294-
unsigned long pfn = 0;
295-
267+
for (vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
296268
ret = vaddr_get_pfn(vaddr, prot, &pfn);
297269
if (ret)
298270
break;
@@ -308,12 +280,24 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
308280
put_pfn(pfn, prot);
309281
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
310282
__func__, limit << PAGE_SHIFT);
311-
break;
283+
ret = -ENOMEM;
284+
goto unpin_out;
312285
}
313286
}
314287

288+
out:
315289
if (!rsvd)
316-
vfio_lock_acct(i);
290+
ret = vfio_lock_acct(i, &lock_cap);
291+
292+
unpin_out:
293+
if (ret) {
294+
if (!rsvd) {
295+
for (pfn = *pfn_base ; i ; pfn++, i--)
296+
put_pfn(pfn, prot);
297+
}
298+
299+
return ret;
300+
}
317301

318302
return i;
319303
}
@@ -328,7 +312,7 @@ static long vfio_unpin_pages(unsigned long pfn, long npage,
328312
unlocked += put_pfn(pfn++, prot);
329313

330314
if (do_accounting)
331-
vfio_lock_acct(-unlocked);
315+
vfio_lock_acct(-unlocked, NULL);
332316

333317
return unlocked;
334318
}
@@ -390,7 +374,7 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
390374
cond_resched();
391375
}
392376

393-
vfio_lock_acct(-unlocked);
377+
vfio_lock_acct(-unlocked, NULL);
394378
}
395379

396380
static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)

0 commit comments

Comments
 (0)