block: Change the return type of blk_mq_map_queues() into void
Since blk_mq_map_queues() and the .map_queues() callbacks always return 0, change their return type into void. Most callers ignore the returned value anyway. Cc: Christoph Hellwig <hch@lst.de> Cc: Jason Wang <jasowang@redhat.com> Cc: Keith Busch <kbusch@kernel.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Doug Gilbert <dgilbert@interlog.com> Cc: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: John Garry <john.garry@huawei.com> Acked-by: Md Haris Iqbal <haris.iqbal@ionos.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Link: https://lore.kernel.org/r/20220815170043.19489-3-bvanassche@acm.org [axboe: fold in fix from Bart] Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
10b41ea15e
commit
a4e1d0b76e
|
@ -32,7 +32,7 @@ static int get_first_sibling(unsigned int cpu)
|
||||||
return cpu;
|
return cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
|
void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
|
||||||
{
|
{
|
||||||
unsigned int *map = qmap->mq_map;
|
unsigned int *map = qmap->mq_map;
|
||||||
unsigned int nr_queues = qmap->nr_queues;
|
unsigned int nr_queues = qmap->nr_queues;
|
||||||
|
@ -70,8 +70,6 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
|
||||||
map[cpu] = map[first_sibling];
|
map[cpu] = map[first_sibling];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
|
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
|
||||||
|
|
||||||
|
|
|
@ -23,8 +23,8 @@
|
||||||
* that maps a queue to the CPUs that have irq affinity for the corresponding
|
* that maps a queue to the CPUs that have irq affinity for the corresponding
|
||||||
* vector.
|
* vector.
|
||||||
*/
|
*/
|
||||||
int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
|
void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
|
||||||
int offset)
|
int offset)
|
||||||
{
|
{
|
||||||
const struct cpumask *mask;
|
const struct cpumask *mask;
|
||||||
unsigned int queue, cpu;
|
unsigned int queue, cpu;
|
||||||
|
@ -38,11 +38,10 @@ int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
|
||||||
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
fallback:
|
fallback:
|
||||||
WARN_ON_ONCE(qmap->nr_queues > 1);
|
WARN_ON_ONCE(qmap->nr_queues > 1);
|
||||||
blk_mq_clear_mq_map(qmap);
|
blk_mq_clear_mq_map(qmap);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
|
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
|
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
|
||||||
* vector, we fallback to the naive mapping.
|
* vector, we fallback to the naive mapping.
|
||||||
*/
|
*/
|
||||||
int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
||||||
struct ib_device *dev, int first_vec)
|
struct ib_device *dev, int first_vec)
|
||||||
{
|
{
|
||||||
const struct cpumask *mask;
|
const struct cpumask *mask;
|
||||||
|
@ -36,9 +36,9 @@ int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
||||||
map->mq_map[cpu] = map->queue_offset + queue;
|
map->mq_map[cpu] = map->queue_offset + queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
fallback:
|
fallback:
|
||||||
return blk_mq_map_queues(map);
|
blk_mq_map_queues(map);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
|
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
* that maps a queue to the CPUs that have irq affinity for the corresponding
|
* that maps a queue to the CPUs that have irq affinity for the corresponding
|
||||||
* vector.
|
* vector.
|
||||||
*/
|
*/
|
||||||
int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
|
void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
|
||||||
struct virtio_device *vdev, int first_vec)
|
struct virtio_device *vdev, int first_vec)
|
||||||
{
|
{
|
||||||
const struct cpumask *mask;
|
const struct cpumask *mask;
|
||||||
|
@ -39,8 +39,9 @@ int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
|
||||||
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
fallback:
|
fallback:
|
||||||
return blk_mq_map_queues(qmap);
|
blk_mq_map_queues(qmap);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
|
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
|
||||||
|
|
|
@ -4190,7 +4190,7 @@ static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
|
static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* blk_mq_map_queues() and multiple .map_queues() implementations
|
* blk_mq_map_queues() and multiple .map_queues() implementations
|
||||||
|
@ -4220,10 +4220,10 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
|
||||||
for (i = 0; i < set->nr_maps; i++)
|
for (i = 0; i < set->nr_maps; i++)
|
||||||
blk_mq_clear_mq_map(&set->map[i]);
|
blk_mq_clear_mq_map(&set->map[i]);
|
||||||
|
|
||||||
return set->ops->map_queues(set);
|
set->ops->map_queues(set);
|
||||||
} else {
|
} else {
|
||||||
BUG_ON(set->nr_maps > 1);
|
BUG_ON(set->nr_maps > 1);
|
||||||
return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
|
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4322,9 +4322,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
||||||
set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
|
set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = blk_mq_update_queue_map(set);
|
blk_mq_update_queue_map(set);
|
||||||
if (ret)
|
|
||||||
goto out_free_mq_map;
|
|
||||||
|
|
||||||
ret = blk_mq_alloc_set_map_and_rqs(set);
|
ret = blk_mq_alloc_set_map_and_rqs(set);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -1528,7 +1528,7 @@ static bool should_requeue_request(struct request *rq)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int null_map_queues(struct blk_mq_tag_set *set)
|
static void null_map_queues(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct nullb *nullb = set->driver_data;
|
struct nullb *nullb = set->driver_data;
|
||||||
int i, qoff;
|
int i, qoff;
|
||||||
|
@ -1579,8 +1579,6 @@ static int null_map_queues(struct blk_mq_tag_set *set)
|
||||||
qoff += map->nr_queues;
|
qoff += map->nr_queues;
|
||||||
blk_mq_map_queues(map);
|
blk_mq_map_queues(map);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
||||||
|
|
|
@ -1165,7 +1165,7 @@ static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
||||||
return cnt;
|
return cnt;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
|
static void rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct rnbd_clt_session *sess = set->driver_data;
|
struct rnbd_clt_session *sess = set->driver_data;
|
||||||
|
|
||||||
|
@ -1194,8 +1194,6 @@ static int rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
|
||||||
set->map[HCTX_TYPE_DEFAULT].nr_queues,
|
set->map[HCTX_TYPE_DEFAULT].nr_queues,
|
||||||
set->map[HCTX_TYPE_READ].nr_queues);
|
set->map[HCTX_TYPE_READ].nr_queues);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct blk_mq_ops rnbd_mq_ops = {
|
static struct blk_mq_ops rnbd_mq_ops = {
|
||||||
|
|
|
@ -802,7 +802,7 @@ static const struct attribute_group *virtblk_attr_groups[] = {
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int virtblk_map_queues(struct blk_mq_tag_set *set)
|
static void virtblk_map_queues(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct virtio_blk *vblk = set->driver_data;
|
struct virtio_blk *vblk = set->driver_data;
|
||||||
int i, qoff;
|
int i, qoff;
|
||||||
|
@ -827,8 +827,6 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
|
||||||
else
|
else
|
||||||
blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
|
blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtblk_complete_batch(struct io_comp_batch *iob)
|
static void virtblk_complete_batch(struct io_comp_batch *iob)
|
||||||
|
|
|
@ -2860,7 +2860,7 @@ nvme_fc_complete_rq(struct request *rq)
|
||||||
nvme_fc_ctrl_put(ctrl);
|
nvme_fc_ctrl_put(ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
|
static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct nvme_fc_ctrl *ctrl = set->driver_data;
|
struct nvme_fc_ctrl *ctrl = set->driver_data;
|
||||||
int i;
|
int i;
|
||||||
|
@ -2880,7 +2880,6 @@ static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
|
||||||
else
|
else
|
||||||
blk_mq_map_queues(map);
|
blk_mq_map_queues(map);
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct blk_mq_ops nvme_fc_mq_ops = {
|
static const struct blk_mq_ops nvme_fc_mq_ops = {
|
||||||
|
|
|
@ -450,7 +450,7 @@ static int queue_irq_offset(struct nvme_dev *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
|
static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct nvme_dev *dev = set->driver_data;
|
struct nvme_dev *dev = set->driver_data;
|
||||||
int i, qoff, offset;
|
int i, qoff, offset;
|
||||||
|
@ -477,8 +477,6 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
|
||||||
qoff += map->nr_queues;
|
qoff += map->nr_queues;
|
||||||
offset += map->nr_queues;
|
offset += map->nr_queues;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -2188,7 +2188,7 @@ static void nvme_rdma_complete_rq(struct request *rq)
|
||||||
nvme_complete_rq(rq);
|
nvme_complete_rq(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
|
static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_ctrl *ctrl = set->driver_data;
|
struct nvme_rdma_ctrl *ctrl = set->driver_data;
|
||||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
||||||
|
@ -2231,8 +2231,6 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT],
|
ctrl->io_queues[HCTX_TYPE_DEFAULT],
|
||||||
ctrl->io_queues[HCTX_TYPE_READ],
|
ctrl->io_queues[HCTX_TYPE_READ],
|
||||||
ctrl->io_queues[HCTX_TYPE_POLL]);
|
ctrl->io_queues[HCTX_TYPE_POLL]);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct blk_mq_ops nvme_rdma_mq_ops = {
|
static const struct blk_mq_ops nvme_rdma_mq_ops = {
|
||||||
|
|
|
@ -2471,7 +2471,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
|
static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct nvme_tcp_ctrl *ctrl = set->driver_data;
|
struct nvme_tcp_ctrl *ctrl = set->driver_data;
|
||||||
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
|
||||||
|
@ -2512,8 +2512,6 @@ static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
|
||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT],
|
ctrl->io_queues[HCTX_TYPE_DEFAULT],
|
||||||
ctrl->io_queues[HCTX_TYPE_READ],
|
ctrl->io_queues[HCTX_TYPE_READ],
|
||||||
ctrl->io_queues[HCTX_TYPE_POLL]);
|
ctrl->io_queues[HCTX_TYPE_POLL]);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
|
||||||
|
|
|
@ -3537,7 +3537,7 @@ static struct attribute *host_v2_hw_attrs[] = {
|
||||||
|
|
||||||
ATTRIBUTE_GROUPS(host_v2_hw);
|
ATTRIBUTE_GROUPS(host_v2_hw);
|
||||||
|
|
||||||
static int map_queues_v2_hw(struct Scsi_Host *shost)
|
static void map_queues_v2_hw(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
struct hisi_hba *hisi_hba = shost_priv(shost);
|
struct hisi_hba *hisi_hba = shost_priv(shost);
|
||||||
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
||||||
|
@ -3552,9 +3552,6 @@ static int map_queues_v2_hw(struct Scsi_Host *shost)
|
||||||
for_each_cpu(cpu, mask)
|
for_each_cpu(cpu, mask)
|
||||||
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct scsi_host_template sht_v2_hw = {
|
static struct scsi_host_template sht_v2_hw = {
|
||||||
|
|
|
@ -3171,13 +3171,12 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hisi_sas_map_queues(struct Scsi_Host *shost)
|
static void hisi_sas_map_queues(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
struct hisi_hba *hisi_hba = shost_priv(shost);
|
struct hisi_hba *hisi_hba = shost_priv(shost);
|
||||||
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
||||||
|
|
||||||
return blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
|
blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev, BASE_VECTORS_V3_HW);
|
||||||
BASE_VECTORS_V3_HW);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct scsi_host_template sht_v3_hw = {
|
static struct scsi_host_template sht_v3_hw = {
|
||||||
|
|
|
@ -3174,7 +3174,7 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int megasas_map_queues(struct Scsi_Host *shost)
|
static void megasas_map_queues(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
struct megasas_instance *instance;
|
struct megasas_instance *instance;
|
||||||
int qoff = 0, offset;
|
int qoff = 0, offset;
|
||||||
|
@ -3183,7 +3183,7 @@ static int megasas_map_queues(struct Scsi_Host *shost)
|
||||||
instance = (struct megasas_instance *)shost->hostdata;
|
instance = (struct megasas_instance *)shost->hostdata;
|
||||||
|
|
||||||
if (shost->nr_hw_queues == 1)
|
if (shost->nr_hw_queues == 1)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
offset = instance->low_latency_index_start;
|
offset = instance->low_latency_index_start;
|
||||||
|
|
||||||
|
@ -3209,8 +3209,6 @@ static int megasas_map_queues(struct Scsi_Host *shost)
|
||||||
map->queue_offset = qoff;
|
map->queue_offset = qoff;
|
||||||
blk_mq_map_queues(map);
|
blk_mq_map_queues(map);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void megasas_aen_polling(struct work_struct *work);
|
static void megasas_aen_polling(struct work_struct *work);
|
||||||
|
|
|
@ -3464,7 +3464,7 @@ static int mpi3mr_bios_param(struct scsi_device *sdev,
|
||||||
*
|
*
|
||||||
* Return: return zero.
|
* Return: return zero.
|
||||||
*/
|
*/
|
||||||
static int mpi3mr_map_queues(struct Scsi_Host *shost)
|
static void mpi3mr_map_queues(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
struct mpi3mr_ioc *mrioc = shost_priv(shost);
|
struct mpi3mr_ioc *mrioc = shost_priv(shost);
|
||||||
int i, qoff, offset;
|
int i, qoff, offset;
|
||||||
|
@ -3500,9 +3500,6 @@ static int mpi3mr_map_queues(struct Scsi_Host *shost)
|
||||||
qoff += map->nr_queues;
|
qoff += map->nr_queues;
|
||||||
offset += map->nr_queues;
|
offset += map->nr_queues;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -11872,7 +11872,7 @@ out:
|
||||||
* scsih_map_queues - map reply queues with request queues
|
* scsih_map_queues - map reply queues with request queues
|
||||||
* @shost: SCSI host pointer
|
* @shost: SCSI host pointer
|
||||||
*/
|
*/
|
||||||
static int scsih_map_queues(struct Scsi_Host *shost)
|
static void scsih_map_queues(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
struct MPT3SAS_ADAPTER *ioc =
|
struct MPT3SAS_ADAPTER *ioc =
|
||||||
(struct MPT3SAS_ADAPTER *)shost->hostdata;
|
(struct MPT3SAS_ADAPTER *)shost->hostdata;
|
||||||
|
@ -11882,7 +11882,7 @@ static int scsih_map_queues(struct Scsi_Host *shost)
|
||||||
int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
|
int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
|
||||||
|
|
||||||
if (shost->nr_hw_queues == 1)
|
if (shost->nr_hw_queues == 1)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
|
for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
|
||||||
map = &shost->tag_set.map[i];
|
map = &shost->tag_set.map[i];
|
||||||
|
@ -11910,7 +11910,6 @@ static int scsih_map_queues(struct Scsi_Host *shost)
|
||||||
|
|
||||||
qoff += map->nr_queues;
|
qoff += map->nr_queues;
|
||||||
}
|
}
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* shost template for SAS 2.0 HBA devices */
|
/* shost template for SAS 2.0 HBA devices */
|
||||||
|
|
|
@ -81,7 +81,7 @@ LIST_HEAD(hba_list);
|
||||||
|
|
||||||
struct workqueue_struct *pm8001_wq;
|
struct workqueue_struct *pm8001_wq;
|
||||||
|
|
||||||
static int pm8001_map_queues(struct Scsi_Host *shost)
|
static void pm8001_map_queues(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
|
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
|
||||||
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
|
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
|
||||||
|
|
|
@ -684,12 +684,8 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
|
||||||
struct blk_mq_queue_map *map)
|
struct blk_mq_queue_map *map)
|
||||||
{
|
{
|
||||||
struct scsi_qla_host *vha = lport->private;
|
struct scsi_qla_host *vha = lport->private;
|
||||||
int rc;
|
|
||||||
|
|
||||||
rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
|
blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
|
||||||
if (rc)
|
|
||||||
ql_log(ql_log_warn, vha, 0x21de,
|
|
||||||
"pci map queue failed 0x%x", rc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
|
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
|
||||||
|
|
|
@ -350,7 +350,7 @@ MODULE_PARM_DESC(ql2xrspq_follow_inptr_legacy,
|
||||||
|
|
||||||
static void qla2x00_clear_drv_active(struct qla_hw_data *);
|
static void qla2x00_clear_drv_active(struct qla_hw_data *);
|
||||||
static void qla2x00_free_device(scsi_qla_host_t *);
|
static void qla2x00_free_device(scsi_qla_host_t *);
|
||||||
static int qla2xxx_map_queues(struct Scsi_Host *shost);
|
static void qla2xxx_map_queues(struct Scsi_Host *shost);
|
||||||
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
|
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
|
||||||
|
|
||||||
u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
|
u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
|
||||||
|
@ -7994,17 +7994,15 @@ qla_pci_reset_done(struct pci_dev *pdev)
|
||||||
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
|
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qla2xxx_map_queues(struct Scsi_Host *shost)
|
static void qla2xxx_map_queues(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
int rc;
|
|
||||||
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
|
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
|
||||||
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
||||||
|
|
||||||
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
|
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
|
||||||
rc = blk_mq_map_queues(qmap);
|
blk_mq_map_queues(qmap);
|
||||||
else
|
else
|
||||||
rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
|
blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct scsi_host_template qla2xxx_driver_template = {
|
struct scsi_host_template qla2xxx_driver_template = {
|
||||||
|
|
|
@ -7474,12 +7474,12 @@ static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
|
||||||
return check_condition_result;
|
return check_condition_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sdebug_map_queues(struct Scsi_Host *shost)
|
static void sdebug_map_queues(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
int i, qoff;
|
int i, qoff;
|
||||||
|
|
||||||
if (shost->nr_hw_queues == 1)
|
if (shost->nr_hw_queues == 1)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
|
for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
|
||||||
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
|
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
|
||||||
|
@ -7501,9 +7501,6 @@ static int sdebug_map_queues(struct Scsi_Host *shost)
|
||||||
|
|
||||||
qoff += map->nr_queues;
|
qoff += map->nr_queues;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
|
static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
|
||||||
|
|
|
@ -1849,13 +1849,13 @@ static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int scsi_map_queues(struct blk_mq_tag_set *set)
|
static void scsi_map_queues(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
|
struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
|
||||||
|
|
||||||
if (shost->hostt->map_queues)
|
if (shost->hostt->map_queues)
|
||||||
return shost->hostt->map_queues(shost);
|
return shost->hostt->map_queues(shost);
|
||||||
return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
|
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
|
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
|
||||||
|
|
|
@ -6436,12 +6436,12 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pqi_map_queues(struct Scsi_Host *shost)
|
static void pqi_map_queues(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
|
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
|
||||||
|
|
||||||
return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
|
blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
|
||||||
ctrl_info->pci_dev, 0);
|
ctrl_info->pci_dev, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
|
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
|
||||||
|
|
|
@ -711,12 +711,12 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
|
||||||
return virtscsi_tmf(vscsi, cmd);
|
return virtscsi_tmf(vscsi, cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtscsi_map_queues(struct Scsi_Host *shost)
|
static void virtscsi_map_queues(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
struct virtio_scsi *vscsi = shost_priv(shost);
|
struct virtio_scsi *vscsi = shost_priv(shost);
|
||||||
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
||||||
|
|
||||||
return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
|
blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
|
static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq)
|
||||||
|
|
|
@ -2701,9 +2701,9 @@ static inline bool is_device_wlun(struct scsi_device *sdev)
|
||||||
* Associate the UFS controller queue with the default and poll HCTX types.
|
* Associate the UFS controller queue with the default and poll HCTX types.
|
||||||
* Initialize the mq_map[] arrays.
|
* Initialize the mq_map[] arrays.
|
||||||
*/
|
*/
|
||||||
static int ufshcd_map_queues(struct Scsi_Host *shost)
|
static void ufshcd_map_queues(struct Scsi_Host *shost)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < shost->nr_maps; i++) {
|
for (i = 0; i < shost->nr_maps; i++) {
|
||||||
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
|
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
|
||||||
|
@ -2720,11 +2720,8 @@ static int ufshcd_map_queues(struct Scsi_Host *shost)
|
||||||
WARN_ON_ONCE(true);
|
WARN_ON_ONCE(true);
|
||||||
}
|
}
|
||||||
map->queue_offset = 0;
|
map->queue_offset = 0;
|
||||||
ret = blk_mq_map_queues(map);
|
blk_mq_map_queues(map);
|
||||||
WARN_ON_ONCE(ret);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
|
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
struct blk_mq_queue_map;
|
struct blk_mq_queue_map;
|
||||||
struct pci_dev;
|
struct pci_dev;
|
||||||
|
|
||||||
int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
|
void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
|
||||||
int offset);
|
int offset);
|
||||||
|
|
||||||
#endif /* _LINUX_BLK_MQ_PCI_H */
|
#endif /* _LINUX_BLK_MQ_PCI_H */
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
struct blk_mq_tag_set;
|
struct blk_mq_tag_set;
|
||||||
struct ib_device;
|
struct ib_device;
|
||||||
|
|
||||||
int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
||||||
struct ib_device *dev, int first_vec);
|
struct ib_device *dev, int first_vec);
|
||||||
|
|
||||||
#endif /* _LINUX_BLK_MQ_RDMA_H */
|
#endif /* _LINUX_BLK_MQ_RDMA_H */
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
struct blk_mq_queue_map;
|
struct blk_mq_queue_map;
|
||||||
struct virtio_device;
|
struct virtio_device;
|
||||||
|
|
||||||
int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
|
void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
|
||||||
struct virtio_device *vdev, int first_vec);
|
struct virtio_device *vdev, int first_vec);
|
||||||
|
|
||||||
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
|
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
|
||||||
|
|
|
@ -630,7 +630,7 @@ struct blk_mq_ops {
|
||||||
* @map_queues: This allows drivers specify their own queue mapping by
|
* @map_queues: This allows drivers specify their own queue mapping by
|
||||||
* overriding the setup-time function that builds the mq_map.
|
* overriding the setup-time function that builds the mq_map.
|
||||||
*/
|
*/
|
||||||
int (*map_queues)(struct blk_mq_tag_set *set);
|
void (*map_queues)(struct blk_mq_tag_set *set);
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEBUG_FS
|
#ifdef CONFIG_BLK_DEBUG_FS
|
||||||
/**
|
/**
|
||||||
|
@ -880,7 +880,7 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
|
||||||
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
|
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
|
||||||
unsigned long timeout);
|
unsigned long timeout);
|
||||||
|
|
||||||
int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
|
void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
|
||||||
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
|
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
|
||||||
|
|
||||||
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
|
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
|
||||||
|
|
|
@ -276,7 +276,7 @@ struct scsi_host_template {
|
||||||
*
|
*
|
||||||
* Status: OPTIONAL
|
* Status: OPTIONAL
|
||||||
*/
|
*/
|
||||||
int (* map_queues)(struct Scsi_Host *shost);
|
void (* map_queues)(struct Scsi_Host *shost);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* SCSI interface of blk_poll - poll for IO completions.
|
* SCSI interface of blk_poll - poll for IO completions.
|
||||||
|
|
Loading…
Reference in New Issue