Skip to content

Commit 600d9ea

Browse files
johnpgarrymartinkpetersen
authored andcommitted
scsi: scsi_debug: Use blk_mq_tagset_busy_iter() in sdebug_blk_mq_poll()
Instead of iterating all deferred commands in the submission queue structures, use blk_mq_tagset_busy_iter(), which is a standard API for this. Signed-off-by: John Garry <john.g.garry@oracle.com> Link: https://lore.kernel.org/r/20230327074310.1862889-8-john.g.garry@oracle.com Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
1 parent 1107c7b commit 600d9ea

1 file changed

Lines changed: 98 additions & 97 deletions

File tree

drivers/scsi/scsi_debug.c

Lines changed: 98 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -7511,123 +7511,124 @@ static void sdebug_map_queues(struct Scsi_Host *shost)
75117511
}
75127512
}
75137513

7514-
static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7514+
struct sdebug_blk_mq_poll_data {
7515+
unsigned int queue_num;
7516+
int *num_entries;
7517+
};
7518+
7519+
/*
7520+
* We don't handle aborted commands here, but it does not seem possible to have
7521+
* aborted polled commands from schedule_resp()
7522+
*/
7523+
static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
75157524
{
7516-
bool first;
7517-
bool retiring = false;
7518-
int num_entries = 0;
7519-
unsigned int qc_idx = 0;
7520-
unsigned long iflags;
7521-
ktime_t kt_from_boot = ktime_get_boottime();
7522-
struct sdebug_queue *sqp;
7523-
struct sdebug_queued_cmd *sqcp;
7524-
struct scsi_cmnd *scp;
7525+
struct sdebug_blk_mq_poll_data *data = opaque;
7526+
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7527+
struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
75257528
struct sdebug_defer *sd_dp;
7529+
u32 unique_tag = blk_mq_unique_tag(rq);
7530+
u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7531+
struct sdebug_queued_cmd *sqcp;
7532+
struct sdebug_queue *sqp;
7533+
unsigned long flags;
7534+
int queue_num = data->queue_num;
7535+
bool retiring = false;
7536+
int qc_idx;
7537+
ktime_t time;
75267538

7527-
sqp = sdebug_q_arr + queue_num;
7539+
/* We're only interested in one queue for this iteration */
7540+
if (hwq != queue_num)
7541+
return true;
75287542

7529-
spin_lock_irqsave(&sqp->qc_lock, iflags);
7543+
/* Subsequent checks would fail if this failed, but check anyway */
7544+
if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7545+
return true;
75307546

7531-
qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7532-
if (qc_idx >= sdebug_max_queue)
7533-
goto unlock;
7547+
time = ktime_get_boottime();
75347548

7535-
for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7536-
unsigned long flags;
7537-
struct sdebug_scsi_cmd *sdsc;
7538-
if (first) {
7539-
first = false;
7540-
if (!test_bit(qc_idx, sqp->in_use_bm))
7541-
continue;
7542-
} else {
7543-
qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7544-
}
7545-
if (qc_idx >= sdebug_max_queue)
7546-
break;
7549+
spin_lock_irqsave(&sdsc->lock, flags);
7550+
sqcp = TO_QUEUED_CMD(cmd);
7551+
if (!sqcp) {
7552+
spin_unlock_irqrestore(&sdsc->lock, flags);
7553+
return true;
7554+
}
75477555

7548-
sqcp = sqp->qc_arr[qc_idx];
7549-
if (!sqcp) {
7550-
pr_err("sqcp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7551-
queue_num, qc_idx, __func__);
7552-
break;
7553-
}
7554-
sd_dp = &sqcp->sd_dp;
7556+
sqp = sdebug_q_arr + queue_num;
7557+
sd_dp = &sqcp->sd_dp;
75557558

7556-
scp = sqcp->scmd;
7557-
if (unlikely(scp == NULL)) {
7558-
pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7559-
queue_num, qc_idx, __func__);
7560-
break;
7561-
}
7562-
sdsc = scsi_cmd_priv(scp);
7563-
spin_lock_irqsave(&sdsc->lock, flags);
7564-
if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
7565-
struct sdebug_queued_cmd *_sqcp = TO_QUEUED_CMD(scp);
7566-
7567-
if (_sqcp != sqcp) {
7568-
pr_err("inconsistent queued cmd tag=%#x\n",
7569-
blk_mq_unique_tag(scsi_cmd_to_rq(scp)));
7570-
spin_unlock_irqrestore(&sdsc->lock, flags);
7571-
continue;
7572-
}
7559+
if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
7560+
spin_unlock_irqrestore(&sdsc->lock, flags);
7561+
return true;
7562+
}
75737563

7574-
if (kt_from_boot < sd_dp->cmpl_ts) {
7575-
spin_unlock_irqrestore(&sdsc->lock, flags);
7576-
continue;
7577-
}
7564+
if (time < sd_dp->cmpl_ts) {
7565+
spin_unlock_irqrestore(&sdsc->lock, flags);
7566+
return true;
7567+
}
75787568

7579-
} else /* ignoring non REQ_POLLED requests */ {
7580-
spin_unlock_irqrestore(&sdsc->lock, flags);
7581-
continue;
7582-
}
7583-
if (unlikely(atomic_read(&retired_max_queue) > 0))
7584-
retiring = true;
7569+
if (unlikely(atomic_read(&retired_max_queue) > 0))
7570+
retiring = true;
75857571

7586-
if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7572+
qc_idx = sd_dp->sqa_idx;
7573+
sqp->qc_arr[qc_idx] = NULL;
7574+
if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7575+
spin_unlock_irqrestore(&sdsc->lock, flags);
7576+
pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u\n",
7577+
sqp, queue_num, qc_idx);
7578+
sdebug_free_queued_cmd(sqcp);
7579+
return true;
7580+
}
7581+
7582+
if (unlikely(retiring)) { /* user has reduced max_queue */
7583+
int k, retval = atomic_read(&retired_max_queue);
7584+
7585+
if (qc_idx >= retval) {
7586+
pr_err("index %d too large\n", retval);
75877587
spin_unlock_irqrestore(&sdsc->lock, flags);
7588-
pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7589-
sqp, queue_num, qc_idx, __func__);
75907588
sdebug_free_queued_cmd(sqcp);
7591-
break;
7592-
}
7593-
sqp->qc_arr[qc_idx] = NULL;
7594-
if (unlikely(retiring)) { /* user has reduced max_queue */
7595-
int k, retval;
7596-
7597-
retval = atomic_read(&retired_max_queue);
7598-
if (qc_idx >= retval) {
7599-
pr_err("index %d too large\n", retval);
7600-
spin_unlock_irqrestore(&sdsc->lock, flags);
7601-
sdebug_free_queued_cmd(sqcp);
7602-
break;
7603-
}
7604-
k = find_last_bit(sqp->in_use_bm, retval);
7605-
if ((k < sdebug_max_queue) || (k == retval))
7606-
atomic_set(&retired_max_queue, 0);
7607-
else
7608-
atomic_set(&retired_max_queue, k + 1);
7589+
return true;
76097590
}
7610-
spin_unlock_irqrestore(&sdsc->lock, flags);
7611-
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
76127591

7613-
if (sdebug_statistics) {
7614-
atomic_inc(&sdebug_completions);
7615-
if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7616-
atomic_inc(&sdebug_miss_cpus);
7617-
}
7592+
k = find_last_bit(sqp->in_use_bm, retval);
7593+
if ((k < sdebug_max_queue) || (k == retval))
7594+
atomic_set(&retired_max_queue, 0);
7595+
else
7596+
atomic_set(&retired_max_queue, k + 1);
7597+
}
76187598

7619-
sdebug_free_queued_cmd(sqcp);
7599+
ASSIGN_QUEUED_CMD(cmd, NULL);
7600+
spin_unlock_irqrestore(&sdsc->lock, flags);
76207601

7621-
scsi_done(scp); /* callback to mid level */
7622-
num_entries++;
7623-
spin_lock_irqsave(&sqp->qc_lock, iflags);
7624-
if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
7625-
break;
7602+
if (sdebug_statistics) {
7603+
atomic_inc(&sdebug_completions);
7604+
if (raw_smp_processor_id() != sd_dp->issuing_cpu)
7605+
atomic_inc(&sdebug_miss_cpus);
76267606
}
76277607

7628-
unlock:
7629-
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7608+
sdebug_free_queued_cmd(sqcp);
76307609

7610+
scsi_done(cmd); /* callback to mid level */
7611+
(*data->num_entries)++;
7612+
return true;
7613+
}
7614+
7615+
static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7616+
{
7617+
int num_entries = 0;
7618+
unsigned long iflags;
7619+
struct sdebug_queue *sqp;
7620+
struct sdebug_blk_mq_poll_data data = {
7621+
.queue_num = queue_num,
7622+
.num_entries = &num_entries,
7623+
};
7624+
sqp = sdebug_q_arr + queue_num;
7625+
7626+
spin_lock_irqsave(&sqp->qc_lock, iflags);
7627+
7628+
blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
7629+
&data);
7630+
7631+
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
76317632
if (num_entries > 0)
76327633
atomic_add(num_entries, &sdeb_mq_poll_count);
76337634
return num_entries;

0 commit comments

Comments
 (0)