Skip to content

Commit 2ae3aab

Browse files
committed
Merge tag 'block-6.13-20250103' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe: "Collection of fixes for block. Particularly the target name overflow has been a bit annoying, as it results in overwriting random memory and hence shows up as triggering various other bugs. - NVMe pull request via Keith: - Fix device specific quirk for PRP list alignment (Robert) - Fix target name overflow (Leo) - Fix target write granularity (Luis) - Fix target sleeping in atomic context (Nilay) - Remove unnecessary tcp queue teardown (Chunguang) - Simple cdrom typo fix" * tag 'block-6.13-20250103' of git://git.kernel.dk/linux: cdrom: Fix typo, 'devicen' to 'device' nvme-tcp: remove nvme_tcp_destroy_io_queues() nvmet-loop: avoid using mutex in IO hotpath nvmet: propagate npwg topology nvmet: Don't overflow subsysnqn nvme-pci: 512 byte aligned dma pool segment quirk
2 parents a984e23 + cc0331e commit 2ae3aab

File tree

10 files changed

+109
-82
lines changed

10 files changed

+109
-82
lines changed

drivers/cdrom/cdrom.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -1106,7 +1106,7 @@ int open_for_data(struct cdrom_device_info *cdi)
11061106
}
11071107
}
11081108

1109-
cd_dbg(CD_OPEN, "all seems well, opening the devicen");
1109+
cd_dbg(CD_OPEN, "all seems well, opening the device\n");
11101110

11111111
/* all seems well, we can open the device */
11121112
ret = cdo->open(cdi, 0); /* open for data */

drivers/nvme/host/nvme.h

+5
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,11 @@ enum nvme_quirks {
173173
* MSI (but not MSI-X) interrupts are broken and never fire.
174174
*/
175175
NVME_QUIRK_BROKEN_MSI = (1 << 21),
176+
177+
/*
178+
* Align dma pool segment size to 512 bytes
179+
*/
180+
NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22),
176181
};
177182

178183
/*

drivers/nvme/host/pci.c

+7-2
Original file line numberDiff line numberDiff line change
@@ -2834,15 +2834,20 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
28342834

28352835
static int nvme_setup_prp_pools(struct nvme_dev *dev)
28362836
{
2837+
size_t small_align = 256;
2838+
28372839
dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
28382840
NVME_CTRL_PAGE_SIZE,
28392841
NVME_CTRL_PAGE_SIZE, 0);
28402842
if (!dev->prp_page_pool)
28412843
return -ENOMEM;
28422844

2845+
if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
2846+
small_align = 512;
2847+
28432848
/* Optimisation for I/Os between 4k and 128k */
28442849
dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
2845-
256, 256, 0);
2850+
256, small_align, 0);
28462851
if (!dev->prp_small_pool) {
28472852
dma_pool_destroy(dev->prp_page_pool);
28482853
return -ENOMEM;
@@ -3607,7 +3612,7 @@ static const struct pci_device_id nvme_id_table[] = {
36073612
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
36083613
.driver_data = NVME_QUIRK_BOGUS_NID, },
36093614
{ PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
3610-
.driver_data = NVME_QUIRK_QDEPTH_ONE },
3615+
.driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
36113616
{ PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
36123617
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
36133618
NVME_QUIRK_BOGUS_NID, },

drivers/nvme/host/tcp.c

+7-11
Original file line numberDiff line numberDiff line change
@@ -2024,14 +2024,6 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
20242024
return __nvme_tcp_alloc_io_queues(ctrl);
20252025
}
20262026

2027-
static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
2028-
{
2029-
nvme_tcp_stop_io_queues(ctrl);
2030-
if (remove)
2031-
nvme_remove_io_tag_set(ctrl);
2032-
nvme_tcp_free_io_queues(ctrl);
2033-
}
2034-
20352027
static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
20362028
{
20372029
int ret, nr_queues;
@@ -2176,9 +2168,11 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
21762168
nvme_sync_io_queues(ctrl);
21772169
nvme_tcp_stop_io_queues(ctrl);
21782170
nvme_cancel_tagset(ctrl);
2179-
if (remove)
2171+
if (remove) {
21802172
nvme_unquiesce_io_queues(ctrl);
2181-
nvme_tcp_destroy_io_queues(ctrl, remove);
2173+
nvme_remove_io_tag_set(ctrl);
2174+
}
2175+
nvme_tcp_free_io_queues(ctrl);
21822176
}
21832177

21842178
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl,
@@ -2267,7 +2261,9 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
22672261
nvme_sync_io_queues(ctrl);
22682262
nvme_tcp_stop_io_queues(ctrl);
22692263
nvme_cancel_tagset(ctrl);
2270-
nvme_tcp_destroy_io_queues(ctrl, new);
2264+
if (new)
2265+
nvme_remove_io_tag_set(ctrl);
2266+
nvme_tcp_free_io_queues(ctrl);
22712267
}
22722268
destroy_admin:
22732269
nvme_stop_keep_alive(ctrl);

drivers/nvme/target/admin-cmd.c

+5-4
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
139139
unsigned long idx;
140140

141141
ctrl = req->sq->ctrl;
142-
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
142+
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
143143
/* we don't have the right data for file backed ns */
144144
if (!ns->bdev)
145145
continue;
@@ -331,9 +331,10 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
331331
u32 count = 0;
332332

333333
if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
334-
xa_for_each(&ctrl->subsys->namespaces, idx, ns)
334+
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
335335
if (ns->anagrpid == grpid)
336336
desc->nsids[count++] = cpu_to_le32(ns->nsid);
337+
}
337338
}
338339

339340
desc->grpid = cpu_to_le32(grpid);
@@ -772,7 +773,7 @@ static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req)
772773
goto out;
773774
}
774775

775-
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
776+
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
776777
if (ns->nsid <= min_endgid)
777778
continue;
778779

@@ -815,7 +816,7 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css)
815816
goto out;
816817
}
817818

818-
xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
819+
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) {
819820
if (ns->nsid <= min_nsid)
820821
continue;
821822
if (match_css && req->ns->csi != req->cmd->identify.csi)

drivers/nvme/target/configfs.c

+9-14
Original file line numberDiff line numberDiff line change
@@ -810,18 +810,6 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
810810
NULL,
811811
};
812812

813-
bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
814-
{
815-
struct config_item *ns_item;
816-
char name[12];
817-
818-
snprintf(name, sizeof(name), "%u", nsid);
819-
mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
820-
ns_item = config_group_find_item(&subsys->namespaces_group, name);
821-
mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
822-
return ns_item != NULL;
823-
}
824-
825813
static void nvmet_ns_release(struct config_item *item)
826814
{
827815
struct nvmet_ns *ns = to_nvmet_ns(item);
@@ -2254,12 +2242,17 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
22542242
const char *page, size_t count)
22552243
{
22562244
struct list_head *entry;
2245+
char *old_nqn, *new_nqn;
22572246
size_t len;
22582247

22592248
len = strcspn(page, "\n");
22602249
if (!len || len > NVMF_NQN_FIELD_LEN - 1)
22612250
return -EINVAL;
22622251

2252+
new_nqn = kstrndup(page, len, GFP_KERNEL);
2253+
if (!new_nqn)
2254+
return -ENOMEM;
2255+
22632256
down_write(&nvmet_config_sem);
22642257
list_for_each(entry, &nvmet_subsystems_group.cg_children) {
22652258
struct config_item *item =
@@ -2268,13 +2261,15 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
22682261
if (!strncmp(config_item_name(item), page, len)) {
22692262
pr_err("duplicate NQN %s\n", config_item_name(item));
22702263
up_write(&nvmet_config_sem);
2264+
kfree(new_nqn);
22712265
return -EINVAL;
22722266
}
22732267
}
2274-
memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
2275-
memcpy(nvmet_disc_subsys->subsysnqn, page, len);
2268+
old_nqn = nvmet_disc_subsys->subsysnqn;
2269+
nvmet_disc_subsys->subsysnqn = new_nqn;
22762270
up_write(&nvmet_config_sem);
22772271

2272+
kfree(old_nqn);
22782273
return len;
22792274
}
22802275

drivers/nvme/target/core.c

+63-45
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
127127
unsigned long idx;
128128
u32 nsid = 0;
129129

130-
xa_for_each(&subsys->namespaces, idx, cur)
130+
nvmet_for_each_enabled_ns(&subsys->namespaces, idx, cur)
131131
nsid = cur->nsid;
132132

133133
return nsid;
@@ -441,11 +441,14 @@ u16 nvmet_req_find_ns(struct nvmet_req *req)
441441
struct nvmet_subsys *subsys = nvmet_req_subsys(req);
442442

443443
req->ns = xa_load(&subsys->namespaces, nsid);
444-
if (unlikely(!req->ns)) {
444+
if (unlikely(!req->ns || !req->ns->enabled)) {
445445
req->error_loc = offsetof(struct nvme_common_command, nsid);
446-
if (nvmet_subsys_nsid_exists(subsys, nsid))
447-
return NVME_SC_INTERNAL_PATH_ERROR;
448-
return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
446+
if (!req->ns) /* ns doesn't exist! */
447+
return NVME_SC_INVALID_NS | NVME_STATUS_DNR;
448+
449+
/* ns exists but it's disabled */
450+
req->ns = NULL;
451+
return NVME_SC_INTERNAL_PATH_ERROR;
449452
}
450453

451454
percpu_ref_get(&req->ns->ref);
@@ -583,8 +586,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
583586
goto out_unlock;
584587

585588
ret = -EMFILE;
586-
if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
587-
goto out_unlock;
588589

589590
ret = nvmet_bdev_ns_enable(ns);
590591
if (ret == -ENOTBLK)
@@ -599,38 +600,19 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
599600
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
600601
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
601602

602-
ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
603-
0, GFP_KERNEL);
604-
if (ret)
605-
goto out_dev_put;
606-
607-
if (ns->nsid > subsys->max_nsid)
608-
subsys->max_nsid = ns->nsid;
609-
610-
ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
611-
if (ret)
612-
goto out_restore_subsys_maxnsid;
613-
614603
if (ns->pr.enable) {
615604
ret = nvmet_pr_init_ns(ns);
616605
if (ret)
617-
goto out_remove_from_subsys;
606+
goto out_dev_put;
618607
}
619608

620-
subsys->nr_namespaces++;
621-
622609
nvmet_ns_changed(subsys, ns->nsid);
623610
ns->enabled = true;
611+
xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
624612
ret = 0;
625613
out_unlock:
626614
mutex_unlock(&subsys->lock);
627615
return ret;
628-
629-
out_remove_from_subsys:
630-
xa_erase(&subsys->namespaces, ns->nsid);
631-
out_restore_subsys_maxnsid:
632-
subsys->max_nsid = nvmet_max_nsid(subsys);
633-
percpu_ref_exit(&ns->ref);
634616
out_dev_put:
635617
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
636618
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
@@ -649,15 +631,37 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
649631
goto out_unlock;
650632

651633
ns->enabled = false;
652-
xa_erase(&ns->subsys->namespaces, ns->nsid);
653-
if (ns->nsid == subsys->max_nsid)
654-
subsys->max_nsid = nvmet_max_nsid(subsys);
634+
xa_clear_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
655635

656636
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
657637
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
658638

659639
mutex_unlock(&subsys->lock);
660640

641+
if (ns->pr.enable)
642+
nvmet_pr_exit_ns(ns);
643+
644+
mutex_lock(&subsys->lock);
645+
nvmet_ns_changed(subsys, ns->nsid);
646+
nvmet_ns_dev_disable(ns);
647+
out_unlock:
648+
mutex_unlock(&subsys->lock);
649+
}
650+
651+
void nvmet_ns_free(struct nvmet_ns *ns)
652+
{
653+
struct nvmet_subsys *subsys = ns->subsys;
654+
655+
nvmet_ns_disable(ns);
656+
657+
mutex_lock(&subsys->lock);
658+
659+
xa_erase(&subsys->namespaces, ns->nsid);
660+
if (ns->nsid == subsys->max_nsid)
661+
subsys->max_nsid = nvmet_max_nsid(subsys);
662+
663+
mutex_unlock(&subsys->lock);
664+
661665
/*
662666
* Now that we removed the namespaces from the lookup list, we
663667
* can kill the per_cpu ref and wait for any remaining references
@@ -671,21 +675,9 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
671675
wait_for_completion(&ns->disable_done);
672676
percpu_ref_exit(&ns->ref);
673677

674-
if (ns->pr.enable)
675-
nvmet_pr_exit_ns(ns);
676-
677678
mutex_lock(&subsys->lock);
678-
679679
subsys->nr_namespaces--;
680-
nvmet_ns_changed(subsys, ns->nsid);
681-
nvmet_ns_dev_disable(ns);
682-
out_unlock:
683680
mutex_unlock(&subsys->lock);
684-
}
685-
686-
void nvmet_ns_free(struct nvmet_ns *ns)
687-
{
688-
nvmet_ns_disable(ns);
689681

690682
down_write(&nvmet_ana_sem);
691683
nvmet_ana_group_enabled[ns->anagrpid]--;
@@ -699,15 +691,33 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
699691
{
700692
struct nvmet_ns *ns;
701693

694+
mutex_lock(&subsys->lock);
695+
696+
if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
697+
goto out_unlock;
698+
702699
ns = kzalloc(sizeof(*ns), GFP_KERNEL);
703700
if (!ns)
704-
return NULL;
701+
goto out_unlock;
705702

706703
init_completion(&ns->disable_done);
707704

708705
ns->nsid = nsid;
709706
ns->subsys = subsys;
710707

708+
if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
709+
goto out_free;
710+
711+
if (ns->nsid > subsys->max_nsid)
712+
subsys->max_nsid = nsid;
713+
714+
if (xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL))
715+
goto out_exit;
716+
717+
subsys->nr_namespaces++;
718+
719+
mutex_unlock(&subsys->lock);
720+
711721
down_write(&nvmet_ana_sem);
712722
ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
713723
nvmet_ana_group_enabled[ns->anagrpid]++;
@@ -718,6 +728,14 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
718728
ns->csi = NVME_CSI_NVM;
719729

720730
return ns;
731+
out_exit:
732+
subsys->max_nsid = nvmet_max_nsid(subsys);
733+
percpu_ref_exit(&ns->ref);
734+
out_free:
735+
kfree(ns);
736+
out_unlock:
737+
mutex_unlock(&subsys->lock);
738+
return NULL;
721739
}
722740

723741
static void nvmet_update_sq_head(struct nvmet_req *req)
@@ -1394,7 +1412,7 @@ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
13941412

13951413
ctrl->p2p_client = get_device(req->p2p_client);
13961414

1397-
xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1415+
nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns)
13981416
nvmet_p2pmem_ns_add_p2p(ctrl, ns);
13991417
}
14001418

drivers/nvme/target/io-cmd-bdev.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
3636
*/
3737
id->nsfeat |= 1 << 4;
3838
/* NPWG = Namespace Preferred Write Granularity. 0's based */
39-
id->npwg = lpp0b;
39+
id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
4040
/* NPWA = Namespace Preferred Write Alignment. 0's based */
4141
id->npwa = id->npwg;
4242
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */

0 commit comments

Comments
 (0)