mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-07-24 01:54:03 -04:00
- Fix nvdimm namespace creation on platforms that do not publish associated 'DIMM' metadata for a persistent memory region. - Miscellaneous fixes and cleanups. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQSbo+XnGs+rwLz9XGXfioYZHlFsZwUCY0nx4QAKCRDfioYZHlFs Zwg3AQD6t9gvqu7AV1eTEGrypC3C47Z5yeeEqLC7U9DjtnxP9wD/VWGNX7uYu6Ck rf8vyT7NFqg0khpU6XeaaDWQtasRggs= =WCrX -----END PGP SIGNATURE----- Merge tag 'libnvdimm-for-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm Pull nvdimm updates from Dan Williams: "Some small cleanups and fixes in and around the nvdimm subsystem. The most significant change is a regression fix for nvdimm namespace (volume) creation when the namespace size is smaller than 2MB/ Summary: - Fix nvdimm namespace creation on platforms that do not publish associated 'DIMM' metadata for a persistent memory region. - Miscellaneous fixes and cleanups" * tag 'libnvdimm-for-6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: ACPI: HMAT: Release platform device in case of platform_device_add_data() fails dax: Remove usage of the deprecated ida_simple_xxx API libnvdimm/region: Allow setting align attribute on regions without mappings nvdimm/namespace: Fix comment typo nvdimm: make __nvdimm_security_overwrite_query static nvdimm/region: Fix kernel-doc nvdimm/namespace: drop unneeded temporary variable in size_store() nvdimm/namespace: return uuid_null only once in nd_dev_to_uuid()
101 lines
2.3 KiB
C
101 lines
2.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/platform_device.h>
|
|
#include <linux/memregion.h>
|
|
#include <linux/module.h>
|
|
#include <linux/dax.h>
|
|
#include <linux/mm.h>
|
|
|
|
static bool nohmem;
|
|
module_param_named(disable, nohmem, bool, 0444);
|
|
|
|
void hmem_register_device(int target_nid, struct resource *r)
|
|
{
|
|
/* define a clean / non-busy resource for the platform device */
|
|
struct resource res = {
|
|
.start = r->start,
|
|
.end = r->end,
|
|
.flags = IORESOURCE_MEM,
|
|
.desc = IORES_DESC_SOFT_RESERVED,
|
|
};
|
|
struct platform_device *pdev;
|
|
struct memregion_info info;
|
|
int rc, id;
|
|
|
|
if (nohmem)
|
|
return;
|
|
|
|
rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
|
|
IORES_DESC_SOFT_RESERVED);
|
|
if (rc != REGION_INTERSECTS)
|
|
return;
|
|
|
|
id = memregion_alloc(GFP_KERNEL);
|
|
if (id < 0) {
|
|
pr_err("memregion allocation failure for %pr\n", &res);
|
|
return;
|
|
}
|
|
|
|
pdev = platform_device_alloc("hmem", id);
|
|
if (!pdev) {
|
|
pr_err("hmem device allocation failure for %pr\n", &res);
|
|
goto out_pdev;
|
|
}
|
|
|
|
pdev->dev.numa_node = numa_map_to_online_node(target_nid);
|
|
info = (struct memregion_info) {
|
|
.target_node = target_nid,
|
|
};
|
|
rc = platform_device_add_data(pdev, &info, sizeof(info));
|
|
if (rc < 0) {
|
|
pr_err("hmem memregion_info allocation failure for %pr\n", &res);
|
|
goto out_resource;
|
|
}
|
|
|
|
rc = platform_device_add_resources(pdev, &res, 1);
|
|
if (rc < 0) {
|
|
pr_err("hmem resource allocation failure for %pr\n", &res);
|
|
goto out_resource;
|
|
}
|
|
|
|
rc = platform_device_add(pdev);
|
|
if (rc < 0) {
|
|
dev_err(&pdev->dev, "device add failed for %pr\n", &res);
|
|
goto out_resource;
|
|
}
|
|
|
|
return;
|
|
|
|
out_resource:
|
|
platform_device_put(pdev);
|
|
out_pdev:
|
|
memregion_free(id);
|
|
}
|
|
|
|
static __init int hmem_register_one(struct resource *res, void *data)
|
|
{
|
|
/*
|
|
* If the resource is not a top-level resource it was already
|
|
* assigned to a device by the HMAT parsing.
|
|
*/
|
|
if (res->parent != &iomem_resource) {
|
|
pr_info("HMEM: skip %pr, already claimed\n", res);
|
|
return 0;
|
|
}
|
|
|
|
hmem_register_device(phys_to_target_node(res->start), res);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static __init int hmem_init(void)
|
|
{
|
|
walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
|
|
IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* As this is a fallback for address ranges unclaimed by the ACPI HMAT
|
|
* parsing it must be at an initcall level greater than hmat_init().
|
|
*/
|
|
late_initcall(hmem_init);
|