@@ -1509,6 +1509,7 @@ hwloc__find_insert_memory_parent(struct hwloc_topology *topology, hwloc_obj_t ob
15091509 return group ;
15101510}
15111511
1512+ /* only works for MEMCACHE and NUMAnode with a single bit in nodeset */
15121513static hwloc_obj_t
15131514hwloc___attach_memory_object_by_nodeset (struct hwloc_topology * topology , hwloc_obj_t parent ,
15141515 hwloc_obj_t obj ,
@@ -1533,19 +1534,47 @@ hwloc___attach_memory_object_by_nodeset(struct hwloc_topology *topology, hwloc_o
15331534
15341535 if (first == curfirst ) {
15351536 /* identical nodeset */
1536- assert (obj -> type == HWLOC_OBJ_NUMANODE );
1537- assert (cur -> type == HWLOC_OBJ_NUMANODE );
1538- /* identical NUMA nodes? ignore the new one */
1539- if (report_error ) {
1540- char curstr [512 ];
1541- char objstr [512 ];
1542- char msg [1100 ];
1543- hwloc__report_error_format_obj (curstr , sizeof (curstr ), cur );
1544- hwloc__report_error_format_obj (objstr , sizeof (objstr ), obj );
1545- snprintf (msg , sizeof (msg ), "%s and %s have identical nodesets!" , objstr , curstr );
1546- report_error (msg , __LINE__ );
1537+ if (obj -> type == HWLOC_OBJ_NUMANODE ) {
1538+ if (cur -> type == HWLOC_OBJ_NUMANODE ) {
1539+ /* identical NUMA nodes? ignore the new one */
1540+ if (report_error ) {
1541+ char curstr [512 ];
1542+ char objstr [512 ];
1543+ char msg [1100 ];
1544+ hwloc__report_error_format_obj (curstr , sizeof (curstr ), cur );
1545+ hwloc__report_error_format_obj (objstr , sizeof (objstr ), obj );
1546+ snprintf (msg , sizeof (msg ), "%s and %s have identical nodesets!" , objstr , curstr );
1547+ report_error (msg , __LINE__ );
1548+ }
1549+ return NULL ;
1550+ }
1551+ assert (cur -> type == HWLOC_OBJ_MEMCACHE );
1552+ /* insert the new NUMA node below that existing memcache */
1553+ return hwloc___attach_memory_object_by_nodeset (topology , cur , obj , report_error );
1554+
1555+ } else {
1556+ assert (obj -> type == HWLOC_OBJ_MEMCACHE );
1557+ if (cur -> type == HWLOC_OBJ_MEMCACHE ) {
1558+ if (cur -> attr -> cache .depth == obj -> attr -> cache .depth )
1559+ /* memcache with same nodeset and depth, ignore the new one */
1560+ return NULL ;
1561+ if (cur -> attr -> cache .depth > obj -> attr -> cache .depth )
1562+ /* memcache with higher cache depth is actually *higher* in the hierarchy
1563+ * (depth starts from the NUMA node).
1564+ * insert the new memcache below the existing one
1565+ */
1566+ return hwloc___attach_memory_object_by_nodeset (topology , cur , obj , report_error );
1567+ }
1568+ /* insert the memcache above the existing memcache or numa node */
1569+ obj -> next_sibling = cur -> next_sibling ;
1570+ cur -> next_sibling = NULL ;
1571+ obj -> memory_first_child = cur ;
1572+ cur -> parent = obj ;
1573+ * curp = obj ;
1574+ obj -> parent = parent ;
1575+ topology -> modified = 1 ;
1576+ return obj ;
15471577 }
1548- return NULL ;
15491578 }
15501579
15511580 curp = & cur -> next_sibling ;
@@ -1587,6 +1616,8 @@ hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent,
15871616 } else if (!hwloc_bitmap_isincluded (obj -> nodeset , obj -> complete_nodeset )) {
15881617 return NULL ;
15891618 }
1619+ /* Neither ACPI nor Linux support multinode mscache */
1620+ assert (hwloc_bitmap_weight (obj -> nodeset ) == 1 );
15901621
15911622#if 0
15921623 /* TODO: enable this instead of hack in fixup_sets once NUMA nodes are inserted late */
@@ -1599,9 +1630,6 @@ hwloc__attach_memory_object(struct hwloc_topology *topology, hwloc_obj_t parent,
15991630 hwloc_bitmap_copy (obj -> complete_cpuset , parent -> complete_cpuset );
16001631#endif
16011632
1602- /* only NUMA nodes are memory for now, just append to the end of the list */
1603- assert (obj -> type == HWLOC_OBJ_NUMANODE );
1604-
16051633 result = hwloc___attach_memory_object_by_nodeset (topology , parent , obj , report_error );
16061634 if (result == obj ) {
16071635 /* Add the bit to the top sets, and to the parent CPU-side object */
0 commit comments