Skip to content

Commit d2a0cac

Browse files
ankitvvsonijoergroedel
authored andcommitted
iommu/amd: move wait_on_sem() out of spinlock
With iommu.strict=1, the existing completion wait path can cause soft lockups under stressed environment, as wait_on_sem() busy-waits under the spinlock with interrupts disabled. Move the completion wait in iommu_completion_wait() out of the spinlock. wait_on_sem() only polls the hardware-updated cmd_sem and does not require iommu->lock, so holding the lock during the busy wait unnecessarily increases contention and extends the time with interrupts disabled. Signed-off-by: Ankit Soni <Ankit.Soni@amd.com> Reviewed-by: Vasant Hegde <vasant.hegde@amd.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
1 parent 9ace475 commit d2a0cac

1 file changed

Lines changed: 17 additions & 8 deletions

File tree

drivers/iommu/amd/iommu.c

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1185,7 +1185,12 @@ static int wait_on_sem(struct amd_iommu *iommu, u64 data)
11851185
{
11861186
int i = 0;
11871187

1188-
while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
1188+
/*
1189+
* cmd_sem holds a monotonically non-decreasing completion sequence
1190+
* number.
1191+
*/
1192+
while ((__s64)(READ_ONCE(*iommu->cmd_sem) - data) < 0 &&
1193+
i < LOOP_TIMEOUT) {
11891194
udelay(1);
11901195
i += 1;
11911196
}
@@ -1437,14 +1442,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
14371442
raw_spin_lock_irqsave(&iommu->lock, flags);
14381443

14391444
ret = __iommu_queue_command_sync(iommu, &cmd, false);
1445+
raw_spin_unlock_irqrestore(&iommu->lock, flags);
1446+
14401447
if (ret)
1441-
goto out_unlock;
1448+
return ret;
14421449

14431450
ret = wait_on_sem(iommu, data);
14441451

1445-
out_unlock:
1446-
raw_spin_unlock_irqrestore(&iommu->lock, flags);
1447-
14481452
return ret;
14491453
}
14501454

@@ -3121,13 +3125,18 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
31213125
raw_spin_lock_irqsave(&iommu->lock, flags);
31223126
ret = __iommu_queue_command_sync(iommu, &cmd, true);
31233127
if (ret)
3124-
goto out;
3128+
goto out_err;
31253129
ret = __iommu_queue_command_sync(iommu, &cmd2, false);
31263130
if (ret)
3127-
goto out;
3131+
goto out_err;
3132+
raw_spin_unlock_irqrestore(&iommu->lock, flags);
3133+
31283134
wait_on_sem(iommu, data);
3129-
out:
3135+
return;
3136+
3137+
out_err:
31303138
raw_spin_unlock_irqrestore(&iommu->lock, flags);
3139+
return;
31313140
}
31323141

31333142
static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data)

0 commit comments

Comments
 (0)