Commit 6085bc95 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dax-fixes-6.1-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull dax fixes from Dan Williams:
 "A few bug fixes around the handling of "Soft Reserved" memory and
  memory tiering information.

  Linux is starting to enounter more real world systems that deploy an
  ACPI HMAT to describe different performance classes of memory, as well
  the "special purpose" (Linux "Soft Reserved") designation from EFI.

  These fixes result from that testing.

  It has all appeared in -next for a while with no known issues.

   - Fix duplicate overlapping device-dax instances for HMAT described
     "Soft Reserved" Memory

   - Fix missing node targets in the sysfs representation of memory
     tiers

   - Remove a confusing variable initialization"

* tag 'dax-fixes-6.1-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  device-dax: Fix duplicate 'hmem' device registration
  ACPI: HMAT: Fix initiator registration for single-initiator systems
  ACPI: HMAT: remove unnecessary variable initialization
parents 97ee9d1c 472faf72
...@@ -562,17 +562,26 @@ static int initiator_cmp(void *priv, const struct list_head *a, ...@@ -562,17 +562,26 @@ static int initiator_cmp(void *priv, const struct list_head *a,
{ {
struct memory_initiator *ia; struct memory_initiator *ia;
struct memory_initiator *ib; struct memory_initiator *ib;
unsigned long *p_nodes = priv;
ia = list_entry(a, struct memory_initiator, node); ia = list_entry(a, struct memory_initiator, node);
ib = list_entry(b, struct memory_initiator, node); ib = list_entry(b, struct memory_initiator, node);
set_bit(ia->processor_pxm, p_nodes);
set_bit(ib->processor_pxm, p_nodes);
return ia->processor_pxm - ib->processor_pxm; return ia->processor_pxm - ib->processor_pxm;
} }
static int initiators_to_nodemask(unsigned long *p_nodes)
{
struct memory_initiator *initiator;
if (list_empty(&initiators))
return -ENXIO;
list_for_each_entry(initiator, &initiators, node)
set_bit(initiator->processor_pxm, p_nodes);
return 0;
}
static void hmat_register_target_initiators(struct memory_target *target) static void hmat_register_target_initiators(struct memory_target *target)
{ {
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
...@@ -609,7 +618,10 @@ static void hmat_register_target_initiators(struct memory_target *target) ...@@ -609,7 +618,10 @@ static void hmat_register_target_initiators(struct memory_target *target)
* initiators. * initiators.
*/ */
bitmap_zero(p_nodes, MAX_NUMNODES); bitmap_zero(p_nodes, MAX_NUMNODES);
list_sort(p_nodes, &initiators, initiator_cmp); list_sort(NULL, &initiators, initiator_cmp);
if (initiators_to_nodemask(p_nodes) < 0)
return;
if (!access0done) { if (!access0done) {
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
loc = localities_types[i]; loc = localities_types[i];
...@@ -643,8 +655,9 @@ static void hmat_register_target_initiators(struct memory_target *target) ...@@ -643,8 +655,9 @@ static void hmat_register_target_initiators(struct memory_target *target)
/* Access 1 ignores Generic Initiators */ /* Access 1 ignores Generic Initiators */
bitmap_zero(p_nodes, MAX_NUMNODES); bitmap_zero(p_nodes, MAX_NUMNODES);
list_sort(p_nodes, &initiators, initiator_cmp); if (initiators_to_nodemask(p_nodes) < 0)
best = 0; return;
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
loc = localities_types[i]; loc = localities_types[i];
if (!loc) if (!loc)
......
...@@ -8,6 +8,13 @@ ...@@ -8,6 +8,13 @@
static bool nohmem; static bool nohmem;
module_param_named(disable, nohmem, bool, 0444); module_param_named(disable, nohmem, bool, 0444);
static struct resource hmem_active = {
.name = "HMEM devices",
.start = 0,
.end = -1,
.flags = IORESOURCE_MEM,
};
void hmem_register_device(int target_nid, struct resource *r) void hmem_register_device(int target_nid, struct resource *r)
{ {
/* define a clean / non-busy resource for the platform device */ /* define a clean / non-busy resource for the platform device */
...@@ -41,6 +48,12 @@ void hmem_register_device(int target_nid, struct resource *r) ...@@ -41,6 +48,12 @@ void hmem_register_device(int target_nid, struct resource *r)
goto out_pdev; goto out_pdev;
} }
if (!__request_region(&hmem_active, res.start, resource_size(&res),
dev_name(&pdev->dev), 0)) {
dev_dbg(&pdev->dev, "hmem range %pr already active\n", &res);
goto out_active;
}
pdev->dev.numa_node = numa_map_to_online_node(target_nid); pdev->dev.numa_node = numa_map_to_online_node(target_nid);
info = (struct memregion_info) { info = (struct memregion_info) {
.target_node = target_nid, .target_node = target_nid,
...@@ -66,6 +79,8 @@ void hmem_register_device(int target_nid, struct resource *r) ...@@ -66,6 +79,8 @@ void hmem_register_device(int target_nid, struct resource *r)
return; return;
out_resource: out_resource:
__release_region(&hmem_active, res.start, resource_size(&res));
out_active:
platform_device_put(pdev); platform_device_put(pdev);
out_pdev: out_pdev:
memregion_free(id); memregion_free(id);
...@@ -73,15 +88,6 @@ void hmem_register_device(int target_nid, struct resource *r) ...@@ -73,15 +88,6 @@ void hmem_register_device(int target_nid, struct resource *r)
static __init int hmem_register_one(struct resource *res, void *data) static __init int hmem_register_one(struct resource *res, void *data)
{ {
/*
* If the resource is not a top-level resource it was already
* assigned to a device by the HMAT parsing.
*/
if (res->parent != &iomem_resource) {
pr_info("HMEM: skip %pr, already claimed\n", res);
return 0;
}
hmem_register_device(phys_to_target_node(res->start), res); hmem_register_device(phys_to_target_node(res->start), res);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment