20 #include "kmp_wrapper_getpid.h" 21 #include "kmp_affinity.h" 26 void __kmp_cleanup_hierarchy() {
27 machine_hierarchy.fini();
30 void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) {
33 if (TCR_1(machine_hierarchy.uninitialized))
34 machine_hierarchy.init(NULL, nproc);
37 if (nproc > machine_hierarchy.base_num_threads)
38 machine_hierarchy.resize(nproc);
40 depth = machine_hierarchy.
depth;
41 KMP_DEBUG_ASSERT(depth > 0);
43 thr_bar->depth = depth;
44 thr_bar->base_leaf_kids = (kmp_uint8)machine_hierarchy.
numPerLevel[0]-1;
45 thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
48 #if KMP_AFFINITY_SUPPORTED 55 __kmp_affinity_print_mask(
char *buf,
int buf_len, kmp_affin_mask_t *mask)
57 int num_chars_to_write, num_chars_written;
59 KMP_ASSERT(buf_len >= 40);
62 num_chars_to_write = hwloc_bitmap_list_snprintf(buf, 0, (hwloc_bitmap_t)mask);
67 if(hwloc_bitmap_iszero((hwloc_bitmap_t)mask)) {
68 KMP_SNPRINTF(buf, buf_len,
"{<empty>}");
69 }
else if(num_chars_to_write < buf_len - 3) {
73 num_chars_written = hwloc_bitmap_list_snprintf(buf+1, buf_len-3, (hwloc_bitmap_t)mask);
74 buf[num_chars_written+1] =
'}';
75 buf[num_chars_written+2] =
'\0';
80 hwloc_bitmap_list_snprintf(buf+1, buf_len-1, (hwloc_bitmap_t)mask);
85 scan = buf + buf_len - 7;
86 while(*scan >=
'0' && *scan <= '9' && scan >= buf)
98 __kmp_affinity_print_mask(
char *buf,
int buf_len, kmp_affin_mask_t *mask)
100 KMP_ASSERT(buf_len >= 40);
102 char *end = buf + buf_len - 1;
108 for (i = 0; i < KMP_CPU_SETSIZE; i++) {
109 if (KMP_CPU_ISSET(i, mask)) {
113 if (i == KMP_CPU_SETSIZE) {
114 KMP_SNPRINTF(scan, end-scan+1,
"{<empty>}");
115 while (*scan !=
'\0') scan++;
116 KMP_ASSERT(scan <= end);
120 KMP_SNPRINTF(scan, end-scan+1,
"{%ld", (
long)i);
121 while (*scan !=
'\0') scan++;
123 for (; i < KMP_CPU_SETSIZE; i++) {
124 if (! KMP_CPU_ISSET(i, mask)) {
134 if (end - scan < 15) {
137 KMP_SNPRINTF(scan, end-scan+1,
",%-ld", (
long)i);
138 while (*scan !=
'\0') scan++;
140 if (i < KMP_CPU_SETSIZE) {
141 KMP_SNPRINTF(scan, end-scan+1,
",...");
142 while (*scan !=
'\0') scan++;
144 KMP_SNPRINTF(scan, end-scan+1,
"}");
145 while (*scan !=
'\0') scan++;
146 KMP_ASSERT(scan <= end);
149 #endif // KMP_USE_HWLOC 153 __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask)
157 # if KMP_GROUP_AFFINITY 159 if (__kmp_num_proc_groups > 1) {
161 KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL);
162 for (group = 0; group < __kmp_num_proc_groups; group++) {
164 int num = __kmp_GetActiveProcessorCount(group);
165 for (i = 0; i < num; i++) {
166 KMP_CPU_SET(i + group * (CHAR_BIT *
sizeof(DWORD_PTR)), mask);
176 for (proc = 0; proc < __kmp_xproc; proc++) {
177 KMP_CPU_SET(proc, mask);
196 __kmp_affinity_assign_child_nums(AddrUnsPair *address2os,
199 KMP_DEBUG_ASSERT(numAddrs > 0);
200 int depth = address2os->first.depth;
201 unsigned *counts = (
unsigned *)__kmp_allocate(depth *
sizeof(
unsigned));
202 unsigned *lastLabel = (
unsigned *)__kmp_allocate(depth
205 for (labCt = 0; labCt < depth; labCt++) {
206 address2os[0].first.childNums[labCt] = counts[labCt] = 0;
207 lastLabel[labCt] = address2os[0].first.labels[labCt];
210 for (i = 1; i < numAddrs; i++) {
211 for (labCt = 0; labCt < depth; labCt++) {
212 if (address2os[i].first.labels[labCt] != lastLabel[labCt]) {
214 for (labCt2 = labCt + 1; labCt2 < depth; labCt2++) {
216 lastLabel[labCt2] = address2os[i].first.labels[labCt2];
219 lastLabel[labCt] = address2os[i].first.labels[labCt];
223 for (labCt = 0; labCt < depth; labCt++) {
224 address2os[i].first.childNums[labCt] = counts[labCt];
226 for (; labCt < (int)Address::maxDepth; labCt++) {
227 address2os[i].first.childNums[labCt] = 0;
230 __kmp_free(lastLabel);
247 kmp_affin_mask_t *__kmp_affin_fullMask = NULL;
249 static int nCoresPerPkg, nPackages;
250 static int __kmp_nThreadsPerCore;
251 #ifndef KMP_DFLT_NTH_CORES 252 static int __kmp_ncores;
254 static int *__kmp_pu_os_idx = NULL;
263 __kmp_affinity_uniform_topology()
265 return __kmp_avail_proc == (__kmp_nThreadsPerCore * nCoresPerPkg * nPackages);
274 __kmp_affinity_print_topology(AddrUnsPair *address2os,
int len,
int depth,
275 int pkgLevel,
int coreLevel,
int threadLevel)
279 KMP_INFORM(OSProcToPhysicalThreadMap,
"KMP_AFFINITY");
280 for (proc = 0; proc < len; proc++) {
283 __kmp_str_buf_init(&buf);
284 for (level = 0; level < depth; level++) {
285 if (level == threadLevel) {
286 __kmp_str_buf_print(&buf,
"%s ", KMP_I18N_STR(Thread));
288 else if (level == coreLevel) {
289 __kmp_str_buf_print(&buf,
"%s ", KMP_I18N_STR(Core));
291 else if (level == pkgLevel) {
292 __kmp_str_buf_print(&buf,
"%s ", KMP_I18N_STR(Package));
294 else if (level > pkgLevel) {
295 __kmp_str_buf_print(&buf,
"%s_%d ", KMP_I18N_STR(Node),
296 level - pkgLevel - 1);
299 __kmp_str_buf_print(&buf,
"L%d ", level);
301 __kmp_str_buf_print(&buf,
"%d ",
302 address2os[proc].first.labels[level]);
304 KMP_INFORM(OSProcMapToPack,
"KMP_AFFINITY", address2os[proc].second,
306 __kmp_str_buf_free(&buf);
318 __kmp_affinity_remove_radix_one_levels(AddrUnsPair *address2os,
int nActiveThreads,
int depth,
int* pkgLevel,
int* coreLevel,
int* threadLevel) {
323 for (level = depth-1; level >= 0; --level) {
325 if (level == *pkgLevel)
329 for (i = 1; i < nActiveThreads; ++i) {
330 if (address2os[0].first.labels[level] != address2os[i].first.labels[level]) {
336 if (!radix1_detected)
339 if (level == *threadLevel) {
342 for (i = 0; i < nActiveThreads; ++i) {
343 address2os[i].first.depth--;
346 }
else if (level == *coreLevel) {
349 for (i = 0; i < nActiveThreads; ++i) {
350 if (*threadLevel != -1) {
351 address2os[i].first.labels[*coreLevel] = address2os[i].first.labels[*threadLevel];
353 address2os[i].first.depth--;
358 return address2os[0].first.depth;
365 __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj, hwloc_obj_type_t type) {
368 for(first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type, obj->logical_index, type, 0);
369 first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, obj->type, first) == obj;
370 first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type, first))
378 __kmp_affinity_create_hwloc_map(AddrUnsPair **address2os,
379 kmp_i18n_id_t *
const msg_id)
382 *msg_id = kmp_i18n_null;
387 kmp_affin_mask_t *oldMask;
388 KMP_CPU_ALLOC(oldMask);
389 __kmp_get_system_affinity(oldMask, TRUE);
396 if (! KMP_AFFINITY_CAPABLE())
402 KMP_ASSERT(__kmp_affinity_type == affinity_none);
404 nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(hwloc_get_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_SOCKET, 0), HWLOC_OBJ_CORE);
405 __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(hwloc_get_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_CORE, 0), HWLOC_OBJ_PU);
406 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
407 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
408 if (__kmp_affinity_verbose) {
409 KMP_INFORM(AffNotCapableUseLocCpuidL11,
"KMP_AFFINITY");
410 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
411 if (__kmp_affinity_uniform_topology()) {
412 KMP_INFORM(Uniform,
"KMP_AFFINITY");
414 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
416 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
417 __kmp_nThreadsPerCore, __kmp_ncores);
419 KMP_CPU_FREE(oldMask);
426 AddrUnsPair *retval = (AddrUnsPair *)__kmp_allocate(
sizeof(AddrUnsPair) * __kmp_avail_proc);
427 __kmp_pu_os_idx = (
int*)__kmp_allocate(
sizeof(
int) * __kmp_avail_proc);
439 int nActiveThreads = 0;
440 int socket_identifier = 0;
442 __kmp_ncores = nPackages = nCoresPerPkg = __kmp_nThreadsPerCore = 0;
443 for(socket = hwloc_get_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_SOCKET, 0);
445 socket = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_SOCKET, socket),
448 int core_identifier = 0;
449 int num_active_cores = 0;
450 for(core = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, socket->type, socket->logical_index, HWLOC_OBJ_CORE, 0);
451 core != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, socket->type, core) == socket;
452 core = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_CORE, core),
455 int pu_identifier = 0;
456 int num_active_threads = 0;
457 for(pu = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, core->type, core->logical_index, HWLOC_OBJ_PU, 0);
458 pu != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, core->type, pu) == core;
459 pu = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, HWLOC_OBJ_PU, pu),
463 if(! KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask))
465 KA_TRACE(20, (
"Hwloc inserting %d (%d) %d (%d) %d (%d) into address2os\n",
466 socket->os_index, socket->logical_index, core->os_index, core->logical_index, pu->os_index,pu->logical_index));
467 addr.labels[0] = socket_identifier;
468 addr.labels[1] = core_identifier;
469 addr.labels[2] = pu_identifier;
470 retval[nActiveThreads] = AddrUnsPair(addr, pu->os_index);
471 __kmp_pu_os_idx[nActiveThreads] = pu->os_index;
473 ++num_active_threads;
475 if (num_active_threads) {
478 if (num_active_threads > __kmp_nThreadsPerCore)
479 __kmp_nThreadsPerCore = num_active_threads;
482 if (num_active_cores) {
484 if (num_active_cores > nCoresPerPkg)
485 nCoresPerPkg = num_active_cores;
492 KMP_DEBUG_ASSERT(nActiveThreads == __kmp_avail_proc);
493 KMP_ASSERT(nActiveThreads > 0);
494 if (nActiveThreads == 1) {
495 __kmp_ncores = nPackages = 1;
496 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
497 if (__kmp_affinity_verbose) {
498 char buf[KMP_AFFIN_MASK_PRINT_LEN];
499 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
501 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
502 if (__kmp_affinity_respect_mask) {
503 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
505 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
507 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
508 KMP_INFORM(Uniform,
"KMP_AFFINITY");
509 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
510 __kmp_nThreadsPerCore, __kmp_ncores);
513 if (__kmp_affinity_type == affinity_none) {
515 KMP_CPU_FREE(oldMask);
523 addr.labels[0] = retval[0].first.labels[pkgLevel];
524 retval[0].first = addr;
526 if (__kmp_affinity_gran_levels < 0) {
527 __kmp_affinity_gran_levels = 0;
530 if (__kmp_affinity_verbose) {
531 __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
534 *address2os = retval;
535 KMP_CPU_FREE(oldMask);
542 qsort(retval, nActiveThreads,
sizeof(*retval), __kmp_affinity_cmp_Address_labels);
547 unsigned uniform = (nPackages * nCoresPerPkg * __kmp_nThreadsPerCore == nActiveThreads);
552 if (__kmp_affinity_verbose) {
553 char mask[KMP_AFFIN_MASK_PRINT_LEN];
554 __kmp_affinity_print_mask(mask, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
556 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
557 if (__kmp_affinity_respect_mask) {
558 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", mask);
560 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", mask);
562 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
564 KMP_INFORM(Uniform,
"KMP_AFFINITY");
566 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
570 __kmp_str_buf_init(&buf);
572 __kmp_str_buf_print(&buf,
"%d", nPackages);
576 KMP_INFORM(TopologyExtra,
"KMP_AFFINITY", buf.str, nCoresPerPkg,
577 __kmp_nThreadsPerCore, __kmp_ncores);
579 __kmp_str_buf_free(&buf);
582 if (__kmp_affinity_type == affinity_none) {
584 KMP_CPU_FREE(oldMask);
592 depth = __kmp_affinity_remove_radix_one_levels(retval, nActiveThreads, depth, &pkgLevel, &coreLevel, &threadLevel);
594 if (__kmp_affinity_gran_levels < 0) {
599 __kmp_affinity_gran_levels = 0;
600 if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
601 __kmp_affinity_gran_levels++;
603 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
604 __kmp_affinity_gran_levels++;
606 if (__kmp_affinity_gran > affinity_gran_package) {
607 __kmp_affinity_gran_levels++;
611 if (__kmp_affinity_verbose) {
612 __kmp_affinity_print_topology(retval, nActiveThreads, depth, pkgLevel,
613 coreLevel, threadLevel);
616 KMP_CPU_FREE(oldMask);
617 *address2os = retval;
620 #endif // KMP_USE_HWLOC 628 __kmp_affinity_create_flat_map(AddrUnsPair **address2os,
629 kmp_i18n_id_t *
const msg_id)
632 *msg_id = kmp_i18n_null;
639 if (! KMP_AFFINITY_CAPABLE()) {
640 KMP_ASSERT(__kmp_affinity_type == affinity_none);
641 __kmp_ncores = nPackages = __kmp_xproc;
642 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
643 if (__kmp_affinity_verbose) {
644 KMP_INFORM(AffFlatTopology,
"KMP_AFFINITY");
645 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
646 KMP_INFORM(Uniform,
"KMP_AFFINITY");
647 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
648 __kmp_nThreadsPerCore, __kmp_ncores);
659 __kmp_ncores = nPackages = __kmp_avail_proc;
660 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
661 if (__kmp_affinity_verbose) {
662 char buf[KMP_AFFIN_MASK_PRINT_LEN];
663 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, __kmp_affin_fullMask);
665 KMP_INFORM(AffCapableUseFlat,
"KMP_AFFINITY");
666 if (__kmp_affinity_respect_mask) {
667 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
669 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
671 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
672 KMP_INFORM(Uniform,
"KMP_AFFINITY");
673 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
674 __kmp_nThreadsPerCore, __kmp_ncores);
676 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
677 __kmp_pu_os_idx = (
int*)__kmp_allocate(
sizeof(
int) * __kmp_avail_proc);
678 if (__kmp_affinity_type == affinity_none) {
681 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
682 if (! KMP_CPU_ISSET(i, __kmp_affin_fullMask))
684 __kmp_pu_os_idx[avail_ct++] = i;
692 *address2os = (AddrUnsPair*)
693 __kmp_allocate(
sizeof(**address2os) * __kmp_avail_proc);
696 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
700 if (! KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
703 __kmp_pu_os_idx[avail_ct] = i;
706 (*address2os)[avail_ct++] = AddrUnsPair(addr,i);
708 if (__kmp_affinity_verbose) {
709 KMP_INFORM(OSProcToPackage,
"KMP_AFFINITY");
712 if (__kmp_affinity_gran_levels < 0) {
717 if (__kmp_affinity_gran > affinity_gran_package) {
718 __kmp_affinity_gran_levels = 1;
721 __kmp_affinity_gran_levels = 0;
728 # if KMP_GROUP_AFFINITY 739 __kmp_affinity_create_proc_group_map(AddrUnsPair **address2os,
740 kmp_i18n_id_t *
const msg_id)
743 *msg_id = kmp_i18n_null;
749 if ((! KMP_AFFINITY_CAPABLE()) || (__kmp_get_proc_group(__kmp_affin_fullMask) >= 0)) {
757 *address2os = (AddrUnsPair*)
758 __kmp_allocate(
sizeof(**address2os) * __kmp_avail_proc);
759 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
760 __kmp_pu_os_idx = (
int*)__kmp_allocate(
sizeof(
int) * __kmp_avail_proc);
763 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
767 if (! KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
770 __kmp_pu_os_idx[avail_ct] = i;
772 addr.labels[0] = i / (CHAR_BIT *
sizeof(DWORD_PTR));
773 addr.labels[1] = i % (CHAR_BIT *
sizeof(DWORD_PTR));
774 (*address2os)[avail_ct++] = AddrUnsPair(addr,i);
776 if (__kmp_affinity_verbose) {
777 KMP_INFORM(AffOSProcToGroup,
"KMP_AFFINITY", i, addr.labels[0],
782 if (__kmp_affinity_gran_levels < 0) {
783 if (__kmp_affinity_gran == affinity_gran_group) {
784 __kmp_affinity_gran_levels = 1;
786 else if ((__kmp_affinity_gran == affinity_gran_fine)
787 || (__kmp_affinity_gran == affinity_gran_thread)) {
788 __kmp_affinity_gran_levels = 0;
791 const char *gran_str = NULL;
792 if (__kmp_affinity_gran == affinity_gran_core) {
795 else if (__kmp_affinity_gran == affinity_gran_package) {
796 gran_str =
"package";
798 else if (__kmp_affinity_gran == affinity_gran_node) {
806 __kmp_affinity_gran_levels = 0;
815 # if KMP_ARCH_X86 || KMP_ARCH_X86_64 818 __kmp_cpuid_mask_width(
int count) {
821 while((1<<r) < count)
827 class apicThreadInfo {
831 unsigned maxCoresPerPkg;
832 unsigned maxThreadsPerPkg;
840 __kmp_affinity_cmp_apicThreadInfo_os_id(
const void *a,
const void *b)
842 const apicThreadInfo *aa = (
const apicThreadInfo *)a;
843 const apicThreadInfo *bb = (
const apicThreadInfo *)b;
844 if (aa->osId < bb->osId)
return -1;
845 if (aa->osId > bb->osId)
return 1;
851 __kmp_affinity_cmp_apicThreadInfo_phys_id(
const void *a,
const void *b)
853 const apicThreadInfo *aa = (
const apicThreadInfo *)a;
854 const apicThreadInfo *bb = (
const apicThreadInfo *)b;
855 if (aa->pkgId < bb->pkgId)
return -1;
856 if (aa->pkgId > bb->pkgId)
return 1;
857 if (aa->coreId < bb->coreId)
return -1;
858 if (aa->coreId > bb->coreId)
return 1;
859 if (aa->threadId < bb->threadId)
return -1;
860 if (aa->threadId > bb->threadId)
return 1;
872 __kmp_affinity_create_apicid_map(AddrUnsPair **address2os,
873 kmp_i18n_id_t *
const msg_id)
878 *msg_id = kmp_i18n_null;
883 __kmp_x86_cpuid(0, 0, &buf);
885 *msg_id = kmp_i18n_str_NoLeaf4Support;
897 if (! KMP_AFFINITY_CAPABLE()) {
902 KMP_ASSERT(__kmp_affinity_type == affinity_none);
912 __kmp_x86_cpuid(1, 0, &buf);
913 int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
914 if (maxThreadsPerPkg == 0) {
915 maxThreadsPerPkg = 1;
931 __kmp_x86_cpuid(0, 0, &buf);
933 __kmp_x86_cpuid(4, 0, &buf);
934 nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
957 __kmp_ncores = __kmp_xproc;
958 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
959 __kmp_nThreadsPerCore = 1;
960 if (__kmp_affinity_verbose) {
961 KMP_INFORM(AffNotCapableUseLocCpuid,
"KMP_AFFINITY");
962 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
963 if (__kmp_affinity_uniform_topology()) {
964 KMP_INFORM(Uniform,
"KMP_AFFINITY");
966 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
968 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
969 __kmp_nThreadsPerCore, __kmp_ncores);
984 kmp_affin_mask_t *oldMask;
985 KMP_CPU_ALLOC(oldMask);
986 KMP_ASSERT(oldMask != NULL);
987 __kmp_get_system_affinity(oldMask, TRUE);
1022 apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate(
1023 __kmp_avail_proc *
sizeof(apicThreadInfo));
1024 unsigned nApics = 0;
1025 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
1029 if (! KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
1032 KMP_DEBUG_ASSERT((
int)nApics < __kmp_avail_proc);
1034 __kmp_affinity_bind_thread(i);
1035 threadInfo[nApics].osId = i;
1040 __kmp_x86_cpuid(1, 0, &buf);
1041 if (! (buf.edx >> 9) & 1) {
1042 __kmp_set_system_affinity(oldMask, TRUE);
1043 __kmp_free(threadInfo);
1044 KMP_CPU_FREE(oldMask);
1045 *msg_id = kmp_i18n_str_ApicNotPresent;
1048 threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff;
1049 threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff;
1050 if (threadInfo[nApics].maxThreadsPerPkg == 0) {
1051 threadInfo[nApics].maxThreadsPerPkg = 1;
1062 __kmp_x86_cpuid(0, 0, &buf);
1064 __kmp_x86_cpuid(4, 0, &buf);
1065 threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1;
1068 threadInfo[nApics].maxCoresPerPkg = 1;
1075 int widthCT = __kmp_cpuid_mask_width(
1076 threadInfo[nApics].maxThreadsPerPkg);
1077 threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT;
1079 int widthC = __kmp_cpuid_mask_width(
1080 threadInfo[nApics].maxCoresPerPkg);
1081 int widthT = widthCT - widthC;
1088 __kmp_set_system_affinity(oldMask, TRUE);
1089 __kmp_free(threadInfo);
1090 KMP_CPU_FREE(oldMask);
1091 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1095 int maskC = (1 << widthC) - 1;
1096 threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT)
1099 int maskT = (1 << widthT) - 1;
1100 threadInfo[nApics].threadId = threadInfo[nApics].apicId &maskT;
1109 __kmp_set_system_affinity(oldMask, TRUE);
1121 KMP_ASSERT(nApics > 0);
1123 __kmp_ncores = nPackages = 1;
1124 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1125 if (__kmp_affinity_verbose) {
1126 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1127 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1129 KMP_INFORM(AffUseGlobCpuid,
"KMP_AFFINITY");
1130 if (__kmp_affinity_respect_mask) {
1131 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
1133 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
1135 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
1136 KMP_INFORM(Uniform,
"KMP_AFFINITY");
1137 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
1138 __kmp_nThreadsPerCore, __kmp_ncores);
1141 if (__kmp_affinity_type == affinity_none) {
1142 __kmp_free(threadInfo);
1143 KMP_CPU_FREE(oldMask);
1147 *address2os = (AddrUnsPair*)__kmp_allocate(
sizeof(AddrUnsPair));
1149 addr.labels[0] = threadInfo[0].pkgId;
1150 (*address2os)[0] = AddrUnsPair(addr, threadInfo[0].osId);
1152 if (__kmp_affinity_gran_levels < 0) {
1153 __kmp_affinity_gran_levels = 0;
1156 if (__kmp_affinity_verbose) {
1157 __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
1160 __kmp_free(threadInfo);
1161 KMP_CPU_FREE(oldMask);
1168 qsort(threadInfo, nApics,
sizeof(*threadInfo),
1169 __kmp_affinity_cmp_apicThreadInfo_phys_id);
1188 __kmp_nThreadsPerCore = 1;
1189 unsigned nCores = 1;
1192 unsigned lastPkgId = threadInfo[0].pkgId;
1193 unsigned coreCt = 1;
1194 unsigned lastCoreId = threadInfo[0].coreId;
1195 unsigned threadCt = 1;
1196 unsigned lastThreadId = threadInfo[0].threadId;
1199 unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg;
1200 unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg;
1202 for (i = 1; i < nApics; i++) {
1203 if (threadInfo[i].pkgId != lastPkgId) {
1206 lastPkgId = threadInfo[i].pkgId;
1207 if ((
int)coreCt > nCoresPerPkg) nCoresPerPkg = coreCt;
1209 lastCoreId = threadInfo[i].coreId;
1210 if ((
int)threadCt > __kmp_nThreadsPerCore) __kmp_nThreadsPerCore = threadCt;
1212 lastThreadId = threadInfo[i].threadId;
1219 prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg;
1220 prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg;
1224 if (threadInfo[i].coreId != lastCoreId) {
1227 lastCoreId = threadInfo[i].coreId;
1228 if ((
int)threadCt > __kmp_nThreadsPerCore) __kmp_nThreadsPerCore = threadCt;
1230 lastThreadId = threadInfo[i].threadId;
1232 else if (threadInfo[i].threadId != lastThreadId) {
1234 lastThreadId = threadInfo[i].threadId;
1237 __kmp_free(threadInfo);
1238 KMP_CPU_FREE(oldMask);
1239 *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique;
1247 if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg)
1248 || (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) {
1249 __kmp_free(threadInfo);
1250 KMP_CPU_FREE(oldMask);
1251 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1256 if ((
int)coreCt > nCoresPerPkg) nCoresPerPkg = coreCt;
1257 if ((
int)threadCt > __kmp_nThreadsPerCore) __kmp_nThreadsPerCore = threadCt;
1265 __kmp_ncores = nCores;
1266 if (__kmp_affinity_verbose) {
1267 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1268 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1270 KMP_INFORM(AffUseGlobCpuid,
"KMP_AFFINITY");
1271 if (__kmp_affinity_respect_mask) {
1272 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
1274 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
1276 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
1277 if (__kmp_affinity_uniform_topology()) {
1278 KMP_INFORM(Uniform,
"KMP_AFFINITY");
1280 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
1282 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
1283 __kmp_nThreadsPerCore, __kmp_ncores);
1286 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1287 KMP_DEBUG_ASSERT(nApics == __kmp_avail_proc);
1288 __kmp_pu_os_idx = (
int*)__kmp_allocate(
sizeof(
int) * __kmp_avail_proc);
1289 for (i = 0; i < nApics; ++i) {
1290 __kmp_pu_os_idx[i] = threadInfo[i].osId;
1292 if (__kmp_affinity_type == affinity_none) {
1293 __kmp_free(threadInfo);
1294 KMP_CPU_FREE(oldMask);
1304 int coreLevel = (nCoresPerPkg <= 1) ? -1 : 1;
1305 int threadLevel = (__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1);
1306 unsigned depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0);
1308 KMP_ASSERT(depth > 0);
1309 *address2os = (AddrUnsPair*)__kmp_allocate(
sizeof(AddrUnsPair) * nApics);
1311 for (i = 0; i < nApics; ++i) {
1312 Address addr(depth);
1313 unsigned os = threadInfo[i].osId;
1316 if (pkgLevel >= 0) {
1317 addr.labels[d++] = threadInfo[i].pkgId;
1319 if (coreLevel >= 0) {
1320 addr.labels[d++] = threadInfo[i].coreId;
1322 if (threadLevel >= 0) {
1323 addr.labels[d++] = threadInfo[i].threadId;
1325 (*address2os)[i] = AddrUnsPair(addr, os);
1328 if (__kmp_affinity_gran_levels < 0) {
1333 __kmp_affinity_gran_levels = 0;
1334 if ((threadLevel >= 0)
1335 && (__kmp_affinity_gran > affinity_gran_thread)) {
1336 __kmp_affinity_gran_levels++;
1338 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1339 __kmp_affinity_gran_levels++;
1341 if ((pkgLevel >= 0) && (__kmp_affinity_gran > affinity_gran_package)) {
1342 __kmp_affinity_gran_levels++;
1346 if (__kmp_affinity_verbose) {
1347 __kmp_affinity_print_topology(*address2os, nApics, depth, pkgLevel,
1348 coreLevel, threadLevel);
1351 __kmp_free(threadInfo);
1352 KMP_CPU_FREE(oldMask);
1363 __kmp_affinity_create_x2apicid_map(AddrUnsPair **address2os,
1364 kmp_i18n_id_t *
const msg_id)
1369 *msg_id = kmp_i18n_null;
1374 __kmp_x86_cpuid(0, 0, &buf);
1376 *msg_id = kmp_i18n_str_NoLeaf11Support;
1379 __kmp_x86_cpuid(11, 0, &buf);
1381 *msg_id = kmp_i18n_str_NoLeaf11Support;
1392 int threadLevel = -1;
1395 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
1397 for (level = 0;; level++) {
1410 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1413 __kmp_x86_cpuid(11, level, &buf);
1424 int kind = (buf.ecx >> 8) & 0xff;
1429 threadLevel = level;
1432 __kmp_nThreadsPerCore = buf.ebx & 0xff;
1433 if (__kmp_nThreadsPerCore == 0) {
1434 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1438 else if (kind == 2) {
1444 nCoresPerPkg = buf.ebx & 0xff;
1445 if (nCoresPerPkg == 0) {
1446 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1452 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1455 if (pkgLevel >= 0) {
1459 nPackages = buf.ebx & 0xff;
1460 if (nPackages == 0) {
1461 *msg_id = kmp_i18n_str_InvalidCpuidInfo;
1474 if (threadLevel >= 0) {
1475 threadLevel = depth - threadLevel - 1;
1477 if (coreLevel >= 0) {
1478 coreLevel = depth - coreLevel - 1;
1480 KMP_DEBUG_ASSERT(pkgLevel >= 0);
1481 pkgLevel = depth - pkgLevel - 1;
1491 if (! KMP_AFFINITY_CAPABLE())
1497 KMP_ASSERT(__kmp_affinity_type == affinity_none);
1499 __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore;
1500 nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg;
1501 if (__kmp_affinity_verbose) {
1502 KMP_INFORM(AffNotCapableUseLocCpuidL11,
"KMP_AFFINITY");
1503 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
1504 if (__kmp_affinity_uniform_topology()) {
1505 KMP_INFORM(Uniform,
"KMP_AFFINITY");
1507 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
1509 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
1510 __kmp_nThreadsPerCore, __kmp_ncores);
1525 kmp_affin_mask_t *oldMask;
1526 KMP_CPU_ALLOC(oldMask);
1527 __kmp_get_system_affinity(oldMask, TRUE);
1532 AddrUnsPair *retval = (AddrUnsPair *)
1533 __kmp_allocate(
sizeof(AddrUnsPair) * __kmp_avail_proc);
1541 KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) {
1545 if (! KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
1548 KMP_DEBUG_ASSERT(nApics < __kmp_avail_proc);
1550 __kmp_affinity_bind_thread(proc);
1556 Address addr(depth);
1559 for (level = 0; level < depth; level++) {
1560 __kmp_x86_cpuid(11, level, &buf);
1561 unsigned apicId = buf.edx;
1563 if (level != depth - 1) {
1564 KMP_CPU_FREE(oldMask);
1565 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1568 addr.labels[depth - level - 1] = apicId >> prev_shift;
1572 int shift = buf.eax & 0x1f;
1573 int mask = (1 << shift) - 1;
1574 addr.labels[depth - level - 1] = (apicId & mask) >> prev_shift;
1577 if (level != depth) {
1578 KMP_CPU_FREE(oldMask);
1579 *msg_id = kmp_i18n_str_InconsistentCpuidInfo;
1583 retval[nApics] = AddrUnsPair(addr, proc);
1591 __kmp_set_system_affinity(oldMask, TRUE);
1596 KMP_ASSERT(nApics > 0);
1598 __kmp_ncores = nPackages = 1;
1599 __kmp_nThreadsPerCore = nCoresPerPkg = 1;
1600 if (__kmp_affinity_verbose) {
1601 char buf[KMP_AFFIN_MASK_PRINT_LEN];
1602 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1604 KMP_INFORM(AffUseGlobCpuidL11,
"KMP_AFFINITY");
1605 if (__kmp_affinity_respect_mask) {
1606 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
1608 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
1610 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
1611 KMP_INFORM(Uniform,
"KMP_AFFINITY");
1612 KMP_INFORM(Topology,
"KMP_AFFINITY", nPackages, nCoresPerPkg,
1613 __kmp_nThreadsPerCore, __kmp_ncores);
1616 if (__kmp_affinity_type == affinity_none) {
1618 KMP_CPU_FREE(oldMask);
1626 addr.labels[0] = retval[0].first.labels[pkgLevel];
1627 retval[0].first = addr;
1629 if (__kmp_affinity_gran_levels < 0) {
1630 __kmp_affinity_gran_levels = 0;
1633 if (__kmp_affinity_verbose) {
1634 __kmp_affinity_print_topology(retval, 1, 1, 0, -1, -1);
1637 *address2os = retval;
1638 KMP_CPU_FREE(oldMask);
1645 qsort(retval, nApics,
sizeof(*retval), __kmp_affinity_cmp_Address_labels);
1650 unsigned *totals = (
unsigned *)__kmp_allocate(depth *
sizeof(
unsigned));
1651 unsigned *counts = (
unsigned *)__kmp_allocate(depth *
sizeof(
unsigned));
1652 unsigned *maxCt = (
unsigned *)__kmp_allocate(depth *
sizeof(
unsigned));
1653 unsigned *last = (
unsigned *)__kmp_allocate(depth *
sizeof(
unsigned));
1654 for (level = 0; level < depth; level++) {
1658 last[level] = retval[0].first.labels[level];
1667 for (proc = 1; (int)proc < nApics; proc++) {
1669 for (level = 0; level < depth; level++) {
1670 if (retval[proc].first.labels[level] != last[level]) {
1672 for (j = level + 1; j < depth; j++) {
1682 last[j] = retval[proc].first.labels[j];
1686 if (counts[level] > maxCt[level]) {
1687 maxCt[level] = counts[level];
1689 last[level] = retval[proc].first.labels[level];
1692 else if (level == depth - 1) {
1698 KMP_CPU_FREE(oldMask);
1699 *msg_id = kmp_i18n_str_x2ApicIDsNotUnique;
1711 if (threadLevel >= 0) {
1712 __kmp_nThreadsPerCore = maxCt[threadLevel];
1715 __kmp_nThreadsPerCore = 1;
1717 nPackages = totals[pkgLevel];
1719 if (coreLevel >= 0) {
1720 __kmp_ncores = totals[coreLevel];
1721 nCoresPerPkg = maxCt[coreLevel];
1724 __kmp_ncores = nPackages;
1731 unsigned prod = maxCt[0];
1732 for (level = 1; level < depth; level++) {
1733 prod *= maxCt[level];
1735 bool uniform = (prod == totals[level - 1]);
1740 if (__kmp_affinity_verbose) {
1741 char mask[KMP_AFFIN_MASK_PRINT_LEN];
1742 __kmp_affinity_print_mask(mask, KMP_AFFIN_MASK_PRINT_LEN, oldMask);
1744 KMP_INFORM(AffUseGlobCpuidL11,
"KMP_AFFINITY");
1745 if (__kmp_affinity_respect_mask) {
1746 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", mask);
1748 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", mask);
1750 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
1752 KMP_INFORM(Uniform,
"KMP_AFFINITY");
1754 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
1758 __kmp_str_buf_init(&buf);
1760 __kmp_str_buf_print(&buf,
"%d", totals[0]);
1761 for (level = 1; level <= pkgLevel; level++) {
1762 __kmp_str_buf_print(&buf,
" x %d", maxCt[level]);
1764 KMP_INFORM(TopologyExtra,
"KMP_AFFINITY", buf.str, nCoresPerPkg,
1765 __kmp_nThreadsPerCore, __kmp_ncores);
1767 __kmp_str_buf_free(&buf);
1769 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
1770 KMP_DEBUG_ASSERT(nApics == __kmp_avail_proc);
1771 __kmp_pu_os_idx = (
int*)__kmp_allocate(
sizeof(
int) * __kmp_avail_proc);
1772 for (proc = 0; (int)proc < nApics; ++proc) {
1773 __kmp_pu_os_idx[proc] = retval[proc].second;
1775 if (__kmp_affinity_type == affinity_none) {
1781 KMP_CPU_FREE(oldMask);
1790 for (level = 0; level < depth; level++) {
1791 if ((maxCt[level] == 1) && (level != pkgLevel)) {
1801 if (new_depth != depth) {
1802 AddrUnsPair *new_retval = (AddrUnsPair *)__kmp_allocate(
1803 sizeof(AddrUnsPair) * nApics);
1804 for (proc = 0; (int)proc < nApics; proc++) {
1805 Address addr(new_depth);
1806 new_retval[proc] = AddrUnsPair(addr, retval[proc].second);
1809 int newPkgLevel = -1;
1810 int newCoreLevel = -1;
1811 int newThreadLevel = -1;
1813 for (level = 0; level < depth; level++) {
1814 if ((maxCt[level] == 1)
1815 && (level != pkgLevel)) {
1821 if (level == pkgLevel) {
1822 newPkgLevel = level;
1824 if (level == coreLevel) {
1825 newCoreLevel = level;
1827 if (level == threadLevel) {
1828 newThreadLevel = level;
1830 for (proc = 0; (int)proc < nApics; proc++) {
1831 new_retval[proc].first.labels[new_level]
1832 = retval[proc].first.labels[level];
1838 retval = new_retval;
1840 pkgLevel = newPkgLevel;
1841 coreLevel = newCoreLevel;
1842 threadLevel = newThreadLevel;
1845 if (__kmp_affinity_gran_levels < 0) {
1850 __kmp_affinity_gran_levels = 0;
1851 if ((threadLevel >= 0) && (__kmp_affinity_gran > affinity_gran_thread)) {
1852 __kmp_affinity_gran_levels++;
1854 if ((coreLevel >= 0) && (__kmp_affinity_gran > affinity_gran_core)) {
1855 __kmp_affinity_gran_levels++;
1857 if (__kmp_affinity_gran > affinity_gran_package) {
1858 __kmp_affinity_gran_levels++;
1862 if (__kmp_affinity_verbose) {
1863 __kmp_affinity_print_topology(retval, nApics, depth, pkgLevel,
1864 coreLevel, threadLevel);
1871 KMP_CPU_FREE(oldMask);
1872 *address2os = retval;
1881 #define threadIdIndex 1 1882 #define coreIdIndex 2 1883 #define pkgIdIndex 3 1884 #define nodeIdIndex 4 1886 typedef unsigned *ProcCpuInfo;
1887 static unsigned maxIndex = pkgIdIndex;
1891 __kmp_affinity_cmp_ProcCpuInfo_os_id(
const void *a,
const void *b)
1893 const unsigned *aa = (
const unsigned *)a;
1894 const unsigned *bb = (
const unsigned *)b;
1895 if (aa[osIdIndex] < bb[osIdIndex])
return -1;
1896 if (aa[osIdIndex] > bb[osIdIndex])
return 1;
1902 __kmp_affinity_cmp_ProcCpuInfo_phys_id(
const void *a,
const void *b)
1905 const unsigned *aa = *((
const unsigned **)a);
1906 const unsigned *bb = *((
const unsigned **)b);
1907 for (i = maxIndex; ; i--) {
1908 if (aa[i] < bb[i])
return -1;
1909 if (aa[i] > bb[i])
return 1;
1910 if (i == osIdIndex)
break;
1921 __kmp_affinity_create_cpuinfo_map(AddrUnsPair **address2os,
int *line,
1922 kmp_i18n_id_t *
const msg_id, FILE *f)
1925 *msg_id = kmp_i18n_null;
1932 unsigned num_records = 0;
1934 buf[
sizeof(buf) - 1] = 1;
1935 if (! fgets(buf,
sizeof(buf), f)) {
1942 char s1[] =
"processor";
1943 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
1952 if (KMP_SSCANF(buf,
"node_%d id", &level) == 1) {
1953 if (nodeIdIndex + level >= maxIndex) {
1954 maxIndex = nodeIdIndex + level;
1965 if (num_records == 0) {
1967 *msg_id = kmp_i18n_str_NoProcRecords;
1970 if (num_records > (
unsigned)__kmp_xproc) {
1972 *msg_id = kmp_i18n_str_TooManyProcRecords;
1983 if (fseek(f, 0, SEEK_SET) != 0) {
1985 *msg_id = kmp_i18n_str_CantRewindCpuinfo;
1993 unsigned **threadInfo = (
unsigned **)__kmp_allocate((num_records + 1)
1994 *
sizeof(
unsigned *));
1996 for (i = 0; i <= num_records; i++) {
1997 threadInfo[i] = (
unsigned *)__kmp_allocate((maxIndex + 1)
1998 *
sizeof(unsigned));
2001 #define CLEANUP_THREAD_INFO \ 2002 for (i = 0; i <= num_records; i++) { \ 2003 __kmp_free(threadInfo[i]); \ 2005 __kmp_free(threadInfo); 2012 #define INIT_PROC_INFO(p) \ 2013 for (__index = 0; __index <= maxIndex; __index++) { \ 2014 (p)[__index] = UINT_MAX; \ 2017 for (i = 0; i <= num_records; i++) {
2018 INIT_PROC_INFO(threadInfo[i]);
2021 unsigned num_avail = 0;
2031 buf[
sizeof(buf) - 1] = 1;
2032 bool long_line =
false;
2033 if (! fgets(buf,
sizeof(buf), f)) {
2041 for (i = 0; i <= maxIndex; i++) {
2042 if (threadInfo[num_avail][i] != UINT_MAX) {
2050 }
else if (!buf[
sizeof(buf) - 1]) {
2057 #define CHECK_LINE \ 2059 CLEANUP_THREAD_INFO; \ 2060 *msg_id = kmp_i18n_str_LongLineCpuinfo; \ 2066 char s1[] =
"processor";
2067 if (strncmp(buf, s1,
sizeof(s1) - 1) == 0) {
2069 char *p = strchr(buf +
sizeof(s1) - 1,
':');
2071 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
goto no_val;
2072 if (threadInfo[num_avail][osIdIndex] != UINT_MAX)
goto dup_field;
2073 threadInfo[num_avail][osIdIndex] = val;
2074 #if KMP_OS_LINUX && USE_SYSFS_INFO 2076 KMP_SNPRINTF(path,
sizeof(path),
2077 "/sys/devices/system/cpu/cpu%u/topology/physical_package_id",
2078 threadInfo[num_avail][osIdIndex]);
2079 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][pkgIdIndex]);
2081 KMP_SNPRINTF(path,
sizeof(path),
2082 "/sys/devices/system/cpu/cpu%u/topology/core_id",
2083 threadInfo[num_avail][osIdIndex]);
2084 __kmp_read_from_file(path,
"%u", &threadInfo[num_avail][coreIdIndex]);
2088 char s2[] =
"physical id";
2089 if (strncmp(buf, s2,
sizeof(s2) - 1) == 0) {
2091 char *p = strchr(buf +
sizeof(s2) - 1,
':');
2093 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
goto no_val;
2094 if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX)
goto dup_field;
2095 threadInfo[num_avail][pkgIdIndex] = val;
2098 char s3[] =
"core id";
2099 if (strncmp(buf, s3,
sizeof(s3) - 1) == 0) {
2101 char *p = strchr(buf +
sizeof(s3) - 1,
':');
2103 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
goto no_val;
2104 if (threadInfo[num_avail][coreIdIndex] != UINT_MAX)
goto dup_field;
2105 threadInfo[num_avail][coreIdIndex] = val;
2107 #endif // KMP_OS_LINUX && USE_SYSFS_INFO 2109 char s4[] =
"thread id";
2110 if (strncmp(buf, s4,
sizeof(s4) - 1) == 0) {
2112 char *p = strchr(buf +
sizeof(s4) - 1,
':');
2114 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
goto no_val;
2115 if (threadInfo[num_avail][threadIdIndex] != UINT_MAX)
goto dup_field;
2116 threadInfo[num_avail][threadIdIndex] = val;
2120 if (KMP_SSCANF(buf,
"node_%d id", &level) == 1) {
2122 char *p = strchr(buf +
sizeof(s4) - 1,
':');
2124 if ((p == NULL) || (KMP_SSCANF(p + 1,
"%u\n", &val) != 1))
goto no_val;
2125 KMP_ASSERT(nodeIdIndex + level <= maxIndex);
2126 if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX)
goto dup_field;
2127 threadInfo[num_avail][nodeIdIndex + level] = val;
2136 if ((*buf != 0) && (*buf !=
'\n')) {
2143 while (((ch = fgetc(f)) != EOF) && (ch !=
'\n'));
2152 if ((
int)num_avail == __kmp_xproc) {
2153 CLEANUP_THREAD_INFO;
2154 *msg_id = kmp_i18n_str_TooManyEntries;
2162 if (threadInfo[num_avail][osIdIndex] == UINT_MAX) {
2163 CLEANUP_THREAD_INFO;
2164 *msg_id = kmp_i18n_str_MissingProcField;
2167 if (threadInfo[0][pkgIdIndex] == UINT_MAX) {
2168 CLEANUP_THREAD_INFO;
2169 *msg_id = kmp_i18n_str_MissingPhysicalIDField;
2176 if (! KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex], __kmp_affin_fullMask)) {
2177 INIT_PROC_INFO(threadInfo[num_avail]);
2186 KMP_ASSERT(num_avail <= num_records);
2187 INIT_PROC_INFO(threadInfo[num_avail]);
2192 CLEANUP_THREAD_INFO;
2193 *msg_id = kmp_i18n_str_MissingValCpuinfo;
2197 CLEANUP_THREAD_INFO;
2198 *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo;
2203 # if KMP_MIC && REDUCE_TEAM_SIZE 2204 unsigned teamSize = 0;
2205 # endif // KMP_MIC && REDUCE_TEAM_SIZE 2219 KMP_ASSERT(num_avail > 0);
2220 KMP_ASSERT(num_avail <= num_records);
2221 if (num_avail == 1) {
2223 __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1;
2224 if (__kmp_affinity_verbose) {
2225 if (! KMP_AFFINITY_CAPABLE()) {
2226 KMP_INFORM(AffNotCapableUseCpuinfo,
"KMP_AFFINITY");
2227 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
2228 KMP_INFORM(Uniform,
"KMP_AFFINITY");
2231 char buf[KMP_AFFIN_MASK_PRINT_LEN];
2232 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
2233 __kmp_affin_fullMask);
2234 KMP_INFORM(AffCapableUseCpuinfo,
"KMP_AFFINITY");
2235 if (__kmp_affinity_respect_mask) {
2236 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
2238 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
2240 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
2241 KMP_INFORM(Uniform,
"KMP_AFFINITY");
2245 __kmp_str_buf_init(&buf);
2246 __kmp_str_buf_print(&buf,
"1");
2247 for (index = maxIndex - 1; index > pkgIdIndex; index--) {
2248 __kmp_str_buf_print(&buf,
" x 1");
2250 KMP_INFORM(TopologyExtra,
"KMP_AFFINITY", buf.str, 1, 1, 1);
2251 __kmp_str_buf_free(&buf);
2254 if (__kmp_affinity_type == affinity_none) {
2255 CLEANUP_THREAD_INFO;
2259 *address2os = (AddrUnsPair*)__kmp_allocate(
sizeof(AddrUnsPair));
2261 addr.labels[0] = threadInfo[0][pkgIdIndex];
2262 (*address2os)[0] = AddrUnsPair(addr, threadInfo[0][osIdIndex]);
2264 if (__kmp_affinity_gran_levels < 0) {
2265 __kmp_affinity_gran_levels = 0;
2268 if (__kmp_affinity_verbose) {
2269 __kmp_affinity_print_topology(*address2os, 1, 1, 0, -1, -1);
2272 CLEANUP_THREAD_INFO;
2279 qsort(threadInfo, num_avail,
sizeof(*threadInfo),
2280 __kmp_affinity_cmp_ProcCpuInfo_phys_id);
2293 unsigned *counts = (
unsigned *)__kmp_allocate((maxIndex + 1)
2294 *
sizeof(unsigned));
2295 unsigned *maxCt = (
unsigned *)__kmp_allocate((maxIndex + 1)
2296 *
sizeof(unsigned));
2297 unsigned *totals = (
unsigned *)__kmp_allocate((maxIndex + 1)
2298 *
sizeof(unsigned));
2299 unsigned *lastId = (
unsigned *)__kmp_allocate((maxIndex + 1)
2300 *
sizeof(unsigned));
2302 bool assign_thread_ids =
false;
2303 unsigned threadIdCt;
2306 restart_radix_check:
2312 if (assign_thread_ids) {
2313 if (threadInfo[0][threadIdIndex] == UINT_MAX) {
2314 threadInfo[0][threadIdIndex] = threadIdCt++;
2316 else if (threadIdCt <= threadInfo[0][threadIdIndex]) {
2317 threadIdCt = threadInfo[0][threadIdIndex] + 1;
2320 for (index = 0; index <= maxIndex; index++) {
2324 lastId[index] = threadInfo[0][index];;
2330 for (i = 1; i < num_avail; i++) {
2335 for (index = maxIndex; index >= threadIdIndex; index--) {
2336 if (assign_thread_ids && (index == threadIdIndex)) {
2340 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2341 threadInfo[i][threadIdIndex] = threadIdCt++;
2349 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2350 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2353 if (threadInfo[i][index] != lastId[index]) {
2362 for (index2 = threadIdIndex; index2 < index; index2++) {
2364 if (counts[index2] > maxCt[index2]) {
2365 maxCt[index2] = counts[index2];
2368 lastId[index2] = threadInfo[i][index2];
2372 lastId[index] = threadInfo[i][index];
2374 if (assign_thread_ids && (index > threadIdIndex)) {
2376 # if KMP_MIC && REDUCE_TEAM_SIZE 2381 teamSize += ( threadIdCt <= 2 ) ? ( threadIdCt ) : ( threadIdCt - 1 );
2382 # endif // KMP_MIC && REDUCE_TEAM_SIZE 2392 if (threadInfo[i][threadIdIndex] == UINT_MAX) {
2393 threadInfo[i][threadIdIndex] = threadIdCt++;
2401 else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
2402 threadIdCt = threadInfo[i][threadIdIndex] + 1;
2408 if (index < threadIdIndex) {
2414 if ((threadInfo[i][threadIdIndex] != UINT_MAX)
2415 || assign_thread_ids) {
2420 CLEANUP_THREAD_INFO;
2421 *msg_id = kmp_i18n_str_PhysicalIDsNotUnique;
2430 assign_thread_ids =
true;
2431 goto restart_radix_check;
2435 # if KMP_MIC && REDUCE_TEAM_SIZE 2440 teamSize += ( threadIdCt <= 2 ) ? ( threadIdCt ) : ( threadIdCt - 1 );
2441 # endif // KMP_MIC && REDUCE_TEAM_SIZE 2443 for (index = threadIdIndex; index <= maxIndex; index++) {
2444 if (counts[index] > maxCt[index]) {
2445 maxCt[index] = counts[index];
2449 __kmp_nThreadsPerCore = maxCt[threadIdIndex];
2450 nCoresPerPkg = maxCt[coreIdIndex];
2451 nPackages = totals[pkgIdIndex];
2456 unsigned prod = totals[maxIndex];
2457 for (index = threadIdIndex; index < maxIndex; index++) {
2458 prod *= maxCt[index];
2460 bool uniform = (prod == totals[threadIdIndex]);
2468 __kmp_ncores = totals[coreIdIndex];
2470 if (__kmp_affinity_verbose) {
2471 if (! KMP_AFFINITY_CAPABLE()) {
2472 KMP_INFORM(AffNotCapableUseCpuinfo,
"KMP_AFFINITY");
2473 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
2475 KMP_INFORM(Uniform,
"KMP_AFFINITY");
2477 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
2481 char buf[KMP_AFFIN_MASK_PRINT_LEN];
2482 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, __kmp_affin_fullMask);
2483 KMP_INFORM(AffCapableUseCpuinfo,
"KMP_AFFINITY");
2484 if (__kmp_affinity_respect_mask) {
2485 KMP_INFORM(InitOSProcSetRespect,
"KMP_AFFINITY", buf);
2487 KMP_INFORM(InitOSProcSetNotRespect,
"KMP_AFFINITY", buf);
2489 KMP_INFORM(AvailableOSProc,
"KMP_AFFINITY", __kmp_avail_proc);
2491 KMP_INFORM(Uniform,
"KMP_AFFINITY");
2493 KMP_INFORM(NonUniform,
"KMP_AFFINITY");
2497 __kmp_str_buf_init(&buf);
2499 __kmp_str_buf_print(&buf,
"%d", totals[maxIndex]);
2500 for (index = maxIndex - 1; index >= pkgIdIndex; index--) {
2501 __kmp_str_buf_print(&buf,
" x %d", maxCt[index]);
2503 KMP_INFORM(TopologyExtra,
"KMP_AFFINITY", buf.str, maxCt[coreIdIndex],
2504 maxCt[threadIdIndex], __kmp_ncores);
2506 __kmp_str_buf_free(&buf);
2509 # if KMP_MIC && REDUCE_TEAM_SIZE 2513 if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) {
2514 __kmp_dflt_team_nth = teamSize;
2515 KA_TRACE(20, (
"__kmp_affinity_create_cpuinfo_map: setting __kmp_dflt_team_nth = %d\n",
2516 __kmp_dflt_team_nth));
2518 # endif // KMP_MIC && REDUCE_TEAM_SIZE 2520 KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
2521 KMP_DEBUG_ASSERT(num_avail == __kmp_avail_proc);
2522 __kmp_pu_os_idx = (
int*)__kmp_allocate(
sizeof(
int) * __kmp_avail_proc);
2523 for (i = 0; i < num_avail; ++i) {
2524 __kmp_pu_os_idx[i] = threadInfo[i][osIdIndex];
2527 if (__kmp_affinity_type == affinity_none) {
2532 CLEANUP_THREAD_INFO;
2543 bool *inMap = (
bool *)__kmp_allocate((maxIndex + 1) *
sizeof(bool));
2545 for (index = threadIdIndex; index < maxIndex; index++) {
2546 KMP_ASSERT(totals[index] >= totals[index + 1]);
2547 inMap[index] = (totals[index] > totals[index + 1]);
2549 inMap[maxIndex] = (totals[maxIndex] > 1);
2550 inMap[pkgIdIndex] =
true;
2553 for (index = threadIdIndex; index <= maxIndex; index++) {
2558 KMP_ASSERT(depth > 0);
2563 *address2os = (AddrUnsPair*)
2564 __kmp_allocate(
sizeof(AddrUnsPair) * num_avail);
2567 int threadLevel = -1;
2569 for (i = 0; i < num_avail; ++i) {
2570 Address addr(depth);
2571 unsigned os = threadInfo[i][osIdIndex];
2575 for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) {
2576 if (! inMap[src_index]) {
2579 addr.labels[dst_index] = threadInfo[i][src_index];
2580 if (src_index == pkgIdIndex) {
2581 pkgLevel = dst_index;
2583 else if (src_index == coreIdIndex) {
2584 coreLevel = dst_index;
2586 else if (src_index == threadIdIndex) {
2587 threadLevel = dst_index;
2591 (*address2os)[i] = AddrUnsPair(addr, os);
2594 if (__kmp_affinity_gran_levels < 0) {
2600 __kmp_affinity_gran_levels = 0;
2601 for (src_index = threadIdIndex; src_index <= maxIndex; src_index++) {
2602 if (! inMap[src_index]) {
2605 switch (src_index) {
2607 if (__kmp_affinity_gran > affinity_gran_thread) {
2608 __kmp_affinity_gran_levels++;
2613 if (__kmp_affinity_gran > affinity_gran_core) {
2614 __kmp_affinity_gran_levels++;
2619 if (__kmp_affinity_gran > affinity_gran_package) {
2620 __kmp_affinity_gran_levels++;
2627 if (__kmp_affinity_verbose) {
2628 __kmp_affinity_print_topology(*address2os, num_avail, depth, pkgLevel,
2629 coreLevel, threadLevel);
2637 CLEANUP_THREAD_INFO;
2647 static kmp_affin_mask_t *
2648 __kmp_create_masks(
unsigned *maxIndex,
unsigned *numUnique,
2649 AddrUnsPair *address2os,
unsigned numAddrs)
2658 KMP_ASSERT(numAddrs > 0);
2659 depth = address2os[0].first.depth;
2662 for (i = 0; i < numAddrs; i++) {
2663 unsigned osId = address2os[i].second;
2664 if (osId > maxOsId) {
2668 kmp_affin_mask_t *osId2Mask;
2669 KMP_CPU_ALLOC_ARRAY(osId2Mask, (maxOsId+1));
2676 qsort(address2os, numAddrs,
sizeof(*address2os),
2677 __kmp_affinity_cmp_Address_labels);
2679 KMP_ASSERT(__kmp_affinity_gran_levels >= 0);
2680 if (__kmp_affinity_verbose && (__kmp_affinity_gran_levels > 0)) {
2681 KMP_INFORM(ThreadsMigrate,
"KMP_AFFINITY", __kmp_affinity_gran_levels);
2683 if (__kmp_affinity_gran_levels >= (
int)depth) {
2684 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
2685 && (__kmp_affinity_type != affinity_none))) {
2686 KMP_WARNING(AffThreadsMayMigrate);
2696 unsigned unique = 0;
2698 unsigned leader = 0;
2699 Address *leaderAddr = &(address2os[0].first);
2700 kmp_affin_mask_t *sum;
2701 KMP_CPU_ALLOC_ON_STACK(sum);
2703 KMP_CPU_SET(address2os[0].second, sum);
2704 for (i = 1; i < numAddrs; i++) {
2710 if (leaderAddr->isClose(address2os[i].first,
2711 __kmp_affinity_gran_levels)) {
2712 KMP_CPU_SET(address2os[i].second, sum);
2721 for (; j < i; j++) {
2722 unsigned osId = address2os[j].second;
2723 KMP_DEBUG_ASSERT(osId <= maxOsId);
2724 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2725 KMP_CPU_COPY(mask, sum);
2726 address2os[j].first.leader = (j == leader);
2734 leaderAddr = &(address2os[i].first);
2736 KMP_CPU_SET(address2os[i].second, sum);
2743 for (; j < i; j++) {
2744 unsigned osId = address2os[j].second;
2745 KMP_DEBUG_ASSERT(osId <= maxOsId);
2746 kmp_affin_mask_t *mask = KMP_CPU_INDEX(osId2Mask, osId);
2747 KMP_CPU_COPY(mask, sum);
2748 address2os[j].first.leader = (j == leader);
2751 KMP_CPU_FREE_FROM_STACK(sum);
2753 *maxIndex = maxOsId;
2754 *numUnique = unique;
2764 static kmp_affin_mask_t *newMasks;
2765 static int numNewMasks;
2766 static int nextNewMask;
2768 #define ADD_MASK(_mask) \ 2770 if (nextNewMask >= numNewMasks) { \ 2773 kmp_affin_mask_t* temp; \ 2774 KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \ 2775 for(i=0;i<numNewMasks/2;i++) { \ 2776 kmp_affin_mask_t* src = KMP_CPU_INDEX(newMasks, i); \ 2777 kmp_affin_mask_t* dest = KMP_CPU_INDEX(temp, i); \ 2778 KMP_CPU_COPY(dest, src); \ 2780 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks/2); \ 2783 KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \ 2787 #define ADD_MASK_OSID(_osId,_osId2Mask,_maxOsId) \ 2789 if (((_osId) > _maxOsId) || \ 2790 (! KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \ 2791 if (__kmp_affinity_verbose || (__kmp_affinity_warnings \ 2792 && (__kmp_affinity_type != affinity_none))) { \ 2793 KMP_WARNING(AffIgnoreInvalidProcID, _osId); \ 2797 ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \ 2807 __kmp_affinity_process_proclist(kmp_affin_mask_t **out_masks,
2808 unsigned int *out_numMasks,
const char *proclist,
2809 kmp_affin_mask_t *osId2Mask,
int maxOsId)
2812 const char *scan = proclist;
2813 const char *next = proclist;
2820 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
2822 kmp_affin_mask_t *sumMask;
2823 KMP_CPU_ALLOC(sumMask);
2827 int start, end, stride;
2831 if (*next ==
'\0') {
2845 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
2848 num = __kmp_str_to_int(scan, *next);
2849 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
2854 if ((num > maxOsId) ||
2855 (! KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2856 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
2857 && (__kmp_affinity_type != affinity_none))) {
2858 KMP_WARNING(AffIgnoreInvalidProcID, num);
2860 KMP_CPU_ZERO(sumMask);
2863 KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2889 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
2890 "bad explicit proc list");
2893 num = __kmp_str_to_int(scan, *next);
2894 KMP_ASSERT2(num >= 0,
"bad explicit proc list");
2899 if ((num > maxOsId) ||
2900 (! KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
2901 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
2902 && (__kmp_affinity_type != affinity_none))) {
2903 KMP_WARNING(AffIgnoreInvalidProcID, num);
2907 KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num));
2926 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
2928 start = __kmp_str_to_int(scan, *next);
2929 KMP_ASSERT2(start >= 0,
"bad explicit proc list");
2936 ADD_MASK_OSID(start, osId2Mask, maxOsId);
2954 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
"bad explicit proc list");
2956 end = __kmp_str_to_int(scan, *next);
2957 KMP_ASSERT2(end >= 0,
"bad explicit proc list");
2978 KMP_ASSERT2((*next >=
'0') && (*next <=
'9'),
2979 "bad explicit proc list");
2981 stride = __kmp_str_to_int(scan, *next);
2982 KMP_ASSERT2(stride >= 0,
"bad explicit proc list");
2989 KMP_ASSERT2(stride != 0,
"bad explicit proc list");
2991 KMP_ASSERT2(start <= end,
"bad explicit proc list");
2994 KMP_ASSERT2(start >= end,
"bad explicit proc list");
2996 KMP_ASSERT2((end - start) / stride <= 65536,
"bad explicit proc list");
3003 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3005 }
while (start <= end);
3009 ADD_MASK_OSID(start, osId2Mask, maxOsId);
3011 }
while (start >= end);
3024 *out_numMasks = nextNewMask;
3025 if (nextNewMask == 0) {
3027 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3030 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3031 for(i = 0; i < nextNewMask; i++) {
3032 kmp_affin_mask_t* src = KMP_CPU_INDEX(newMasks, i);
3033 kmp_affin_mask_t* dest = KMP_CPU_INDEX((*out_masks), i);
3034 KMP_CPU_COPY(dest, src);
3036 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3037 KMP_CPU_FREE(sumMask);
3067 __kmp_process_subplace_list(
const char **scan, kmp_affin_mask_t *osId2Mask,
3068 int maxOsId, kmp_affin_mask_t *tempMask,
int *setSize)
3073 int start, count, stride, i;
3079 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
3080 "bad explicit places list");
3083 start = __kmp_str_to_int(*scan, *next);
3084 KMP_ASSERT(start >= 0);
3091 if (**scan ==
'}' || **scan ==
',') {
3092 if ((start > maxOsId) ||
3093 (! KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3094 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3095 && (__kmp_affinity_type != affinity_none))) {
3096 KMP_WARNING(AffIgnoreInvalidProcID, start);
3100 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3103 if (**scan ==
'}') {
3109 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3116 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
3117 "bad explicit places list");
3120 count = __kmp_str_to_int(*scan, *next);
3121 KMP_ASSERT(count >= 0);
3128 if (**scan ==
'}' || **scan ==
',') {
3129 for (i = 0; i < count; i++) {
3130 if ((start > maxOsId) ||
3131 (! KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3132 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3133 && (__kmp_affinity_type != affinity_none))) {
3134 KMP_WARNING(AffIgnoreInvalidProcID, start);
3139 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3144 if (**scan ==
'}') {
3150 KMP_ASSERT2(**scan ==
':',
"bad explicit places list");
3159 if (**scan ==
'+') {
3163 if (**scan ==
'-') {
3171 KMP_ASSERT2((**scan >=
'0') && (**scan <=
'9'),
3172 "bad explicit places list");
3175 stride = __kmp_str_to_int(*scan, *next);
3176 KMP_ASSERT(stride >= 0);
3184 if (**scan ==
'}' || **scan ==
',') {
3185 for (i = 0; i < count; i++) {
3186 if ((start > maxOsId) ||
3187 (! KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) {
3188 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3189 && (__kmp_affinity_type != affinity_none))) {
3190 KMP_WARNING(AffIgnoreInvalidProcID, start);
3195 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start));
3200 if (**scan ==
'}') {
3207 KMP_ASSERT2(0,
"bad explicit places list");
3213 __kmp_process_place(
const char **scan, kmp_affin_mask_t *osId2Mask,
3214 int maxOsId, kmp_affin_mask_t *tempMask,
int *setSize)
3222 if (**scan ==
'{') {
3224 __kmp_process_subplace_list(scan, osId2Mask, maxOsId , tempMask,
3226 KMP_ASSERT2(**scan ==
'}',
"bad explicit places list");
3229 else if (**scan ==
'!') {
3231 __kmp_process_place(scan, osId2Mask, maxOsId, tempMask, setSize);
3232 KMP_CPU_COMPLEMENT(maxOsId, tempMask);
3234 else if ((**scan >=
'0') && (**scan <=
'9')) {
3237 int num = __kmp_str_to_int(*scan, *next);
3238 KMP_ASSERT(num >= 0);
3239 if ((num > maxOsId) ||
3240 (! KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) {
3241 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3242 && (__kmp_affinity_type != affinity_none))) {
3243 KMP_WARNING(AffIgnoreInvalidProcID, num);
3247 KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num));
3253 KMP_ASSERT2(0,
"bad explicit places list");
3260 __kmp_affinity_process_placelist(kmp_affin_mask_t **out_masks,
3261 unsigned int *out_numMasks,
const char *placelist,
3262 kmp_affin_mask_t *osId2Mask,
int maxOsId)
3264 int i,j,count,stride,sign;
3265 const char *scan = placelist;
3266 const char *next = placelist;
3269 KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks);
3275 kmp_affin_mask_t *tempMask;
3276 kmp_affin_mask_t *previousMask;
3277 KMP_CPU_ALLOC(tempMask);
3278 KMP_CPU_ZERO(tempMask);
3279 KMP_CPU_ALLOC(previousMask);
3280 KMP_CPU_ZERO(previousMask);
3284 __kmp_process_place(&scan, osId2Mask, maxOsId, tempMask, &setSize);
3290 if (*scan ==
'\0' || *scan ==
',') {
3294 KMP_CPU_ZERO(tempMask);
3296 if (*scan ==
'\0') {
3303 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3310 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
3311 "bad explicit places list");
3314 count = __kmp_str_to_int(scan, *next);
3315 KMP_ASSERT(count >= 0);
3322 if (*scan ==
'\0' || *scan ==
',') {
3326 KMP_ASSERT2(*scan ==
':',
"bad explicit places list");
3347 KMP_ASSERT2((*scan >=
'0') && (*scan <=
'9'),
3348 "bad explicit places list");
3351 stride = __kmp_str_to_int(scan, *next);
3352 KMP_DEBUG_ASSERT(stride >= 0);
3358 for (i = 0; i < count; i++) {
3363 KMP_CPU_COPY(previousMask, tempMask);
3364 ADD_MASK(previousMask);
3365 KMP_CPU_ZERO(tempMask);
3367 KMP_CPU_SET_ITERATE(j, previousMask) {
3368 if (! KMP_CPU_ISSET(j, previousMask)) {
3371 if ((j+stride > maxOsId) || (j+stride < 0) ||
3372 (! KMP_CPU_ISSET(j, __kmp_affin_fullMask)) ||
3373 (! KMP_CPU_ISSET(j+stride, KMP_CPU_INDEX(osId2Mask, j+stride)))) {
3374 if ((__kmp_affinity_verbose || (__kmp_affinity_warnings
3375 && (__kmp_affinity_type != affinity_none))) && i < count - 1) {
3376 KMP_WARNING(AffIgnoreInvalidProcID, j+stride);
3380 KMP_CPU_SET(j+stride, tempMask);
3384 KMP_CPU_ZERO(tempMask);
3391 if (*scan ==
'\0') {
3399 KMP_ASSERT2(0,
"bad explicit places list");
3402 *out_numMasks = nextNewMask;
3403 if (nextNewMask == 0) {
3405 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3408 KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask);
3409 KMP_CPU_FREE(tempMask);
3410 KMP_CPU_FREE(previousMask);
3411 for(i = 0; i < nextNewMask; i++) {
3412 kmp_affin_mask_t* src = KMP_CPU_INDEX(newMasks, i);
3413 kmp_affin_mask_t* dest = KMP_CPU_INDEX((*out_masks), i);
3414 KMP_CPU_COPY(dest, src);
3416 KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks);
3422 #undef ADD_MASK_OSID 3425 __kmp_apply_thread_places(AddrUnsPair **pAddr,
int depth)
3427 int i, j, k, n_old = 0, n_new = 0, proc_num = 0;
3428 if (__kmp_place_num_sockets == 0 &&
3429 __kmp_place_num_cores == 0 &&
3430 __kmp_place_num_threads_per_core == 0 )
3432 if (__kmp_place_num_sockets == 0)
3433 __kmp_place_num_sockets = nPackages;
3434 if (__kmp_place_num_cores == 0)
3435 __kmp_place_num_cores = nCoresPerPkg;
3436 if (__kmp_place_num_threads_per_core == 0 ||
3437 __kmp_place_num_threads_per_core > __kmp_nThreadsPerCore)
3438 __kmp_place_num_threads_per_core = __kmp_nThreadsPerCore;
3440 if ( !__kmp_affinity_uniform_topology() ) {
3441 KMP_WARNING( AffHWSubsetNonUniform );
3445 KMP_WARNING( AffHWSubsetNonThreeLevel );
3448 if (__kmp_place_socket_offset + __kmp_place_num_sockets > nPackages) {
3449 KMP_WARNING(AffHWSubsetManySockets);
3452 if ( __kmp_place_core_offset + __kmp_place_num_cores > nCoresPerPkg ) {
3453 KMP_WARNING( AffHWSubsetManyCores );
3457 AddrUnsPair *newAddr;
3459 newAddr = (AddrUnsPair *)__kmp_allocate(
sizeof(AddrUnsPair) *
3460 __kmp_place_num_sockets * __kmp_place_num_cores * __kmp_place_num_threads_per_core);
3462 for (i = 0; i < nPackages; ++i) {
3463 if (i < __kmp_place_socket_offset ||
3464 i >= __kmp_place_socket_offset + __kmp_place_num_sockets) {
3465 n_old += nCoresPerPkg * __kmp_nThreadsPerCore;
3466 if (__kmp_pu_os_idx != NULL) {
3467 for (j = 0; j < nCoresPerPkg; ++j) {
3468 for (k = 0; k < __kmp_nThreadsPerCore; ++k) {
3469 KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
3475 for (j = 0; j < nCoresPerPkg; ++j) {
3476 if (j < __kmp_place_core_offset ||
3477 j >= __kmp_place_core_offset + __kmp_place_num_cores) {
3478 n_old += __kmp_nThreadsPerCore;
3479 if (__kmp_pu_os_idx != NULL) {
3480 for (k = 0; k < __kmp_nThreadsPerCore; ++k) {
3481 KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
3486 for (k = 0; k < __kmp_nThreadsPerCore; ++k) {
3487 if (k < __kmp_place_num_threads_per_core) {
3489 newAddr[n_new] = (*pAddr)[n_old];
3492 if (__kmp_pu_os_idx != NULL)
3493 KMP_CPU_CLR(__kmp_pu_os_idx[proc_num], __kmp_affin_fullMask);
3502 KMP_DEBUG_ASSERT(n_old == nPackages * nCoresPerPkg * __kmp_nThreadsPerCore);
3503 KMP_DEBUG_ASSERT(n_new == __kmp_place_num_sockets * __kmp_place_num_cores *
3504 __kmp_place_num_threads_per_core);
3506 nPackages = __kmp_place_num_sockets;
3507 nCoresPerPkg = __kmp_place_num_cores;
3508 __kmp_nThreadsPerCore = __kmp_place_num_threads_per_core;
3509 __kmp_avail_proc = n_new;
3510 __kmp_ncores = nPackages * __kmp_place_num_cores;
3513 __kmp_free( *pAddr );
3517 if (__kmp_pu_os_idx != NULL) {
3518 __kmp_free(__kmp_pu_os_idx);
3519 __kmp_pu_os_idx = NULL;
3524 static AddrUnsPair *address2os = NULL;
3525 static int * procarr = NULL;
3526 static int __kmp_aff_depth = 0;
3528 #define KMP_EXIT_AFF_NONE \ 3529 KMP_ASSERT(__kmp_affinity_type == affinity_none); \ 3530 KMP_ASSERT(address2os == NULL); \ 3531 __kmp_apply_thread_places(NULL, 0); \ 3535 __kmp_aux_affinity_initialize(
void)
3537 if (__kmp_affinity_masks != NULL) {
3538 KMP_ASSERT(__kmp_affin_fullMask != NULL);
3548 if (__kmp_affin_fullMask == NULL) {
3549 KMP_CPU_ALLOC(__kmp_affin_fullMask);
3551 if (KMP_AFFINITY_CAPABLE()) {
3552 if (__kmp_affinity_respect_mask) {
3553 __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE);
3559 __kmp_avail_proc = 0;
3560 KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) {
3561 if (! KMP_CPU_ISSET(i, __kmp_affin_fullMask)) {
3566 if (__kmp_avail_proc > __kmp_xproc) {
3567 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3568 && (__kmp_affinity_type != affinity_none))) {
3569 KMP_WARNING(ErrorInitializeAffinity);
3571 __kmp_affinity_type = affinity_none;
3572 KMP_AFFINITY_DISABLE();
3577 __kmp_affinity_entire_machine_mask(__kmp_affin_fullMask);
3578 __kmp_avail_proc = __kmp_xproc;
3583 kmp_i18n_id_t msg_id = kmp_i18n_null;
3589 if ((__kmp_cpuinfo_file != NULL) &&
3590 (__kmp_affinity_top_method == affinity_top_method_all)) {
3591 __kmp_affinity_top_method = affinity_top_method_cpuinfo;
3594 if (__kmp_affinity_top_method == affinity_top_method_all) {
3600 const char *file_name = NULL;
3604 if (__kmp_affinity_verbose) {
3605 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
3607 if(!__kmp_hwloc_error) {
3608 depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
3611 }
else if(depth < 0 && __kmp_affinity_verbose) {
3612 KMP_INFORM(AffIgnoringHwloc,
"KMP_AFFINITY");
3614 }
else if(__kmp_affinity_verbose) {
3615 KMP_INFORM(AffIgnoringHwloc,
"KMP_AFFINITY");
3620 # if KMP_ARCH_X86 || KMP_ARCH_X86_64 3623 if (__kmp_affinity_verbose) {
3624 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(Decodingx2APIC));
3628 depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
3634 if (__kmp_affinity_verbose) {
3635 if (msg_id != kmp_i18n_null) {
3636 KMP_INFORM(AffInfoStrStr,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id),
3637 KMP_I18N_STR(DecodingLegacyAPIC));
3640 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY", KMP_I18N_STR(DecodingLegacyAPIC));
3645 depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
3657 if (__kmp_affinity_verbose) {
3658 if (msg_id != kmp_i18n_null) {
3659 KMP_INFORM(AffStrParseFilename,
"KMP_AFFINITY", __kmp_i18n_catgets(msg_id),
"/proc/cpuinfo");
3662 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY",
"/proc/cpuinfo");
3666 FILE *f = fopen(
"/proc/cpuinfo",
"r");
3668 msg_id = kmp_i18n_str_CantOpenCpuinfo;
3671 file_name =
"/proc/cpuinfo";
3672 depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
3682 # if KMP_GROUP_AFFINITY 3684 if ((depth < 0) && (__kmp_num_proc_groups > 1)) {
3685 if (__kmp_affinity_verbose) {
3686 KMP_INFORM(AffWindowsProcGroupMap,
"KMP_AFFINITY");
3689 depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
3690 KMP_ASSERT(depth != 0);
3696 if (__kmp_affinity_verbose && (msg_id != kmp_i18n_null)) {
3697 if (file_name == NULL) {
3698 KMP_INFORM(UsingFlatOS, __kmp_i18n_catgets(msg_id));
3700 else if (line == 0) {
3701 KMP_INFORM(UsingFlatOSFile, file_name, __kmp_i18n_catgets(msg_id));
3704 KMP_INFORM(UsingFlatOSFileLine, file_name, line, __kmp_i18n_catgets(msg_id));
3710 depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
3714 KMP_ASSERT(depth > 0);
3715 KMP_ASSERT(address2os != NULL);
3725 # if KMP_ARCH_X86 || KMP_ARCH_X86_64 3727 else if (__kmp_affinity_top_method == affinity_top_method_x2apicid) {
3728 if (__kmp_affinity_verbose) {
3729 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY",
3730 KMP_I18N_STR(Decodingx2APIC));
3733 depth = __kmp_affinity_create_x2apicid_map(&address2os, &msg_id);
3738 KMP_ASSERT(msg_id != kmp_i18n_null);
3739 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3742 else if (__kmp_affinity_top_method == affinity_top_method_apicid) {
3743 if (__kmp_affinity_verbose) {
3744 KMP_INFORM(AffInfoStr,
"KMP_AFFINITY",
3745 KMP_I18N_STR(DecodingLegacyAPIC));
3748 depth = __kmp_affinity_create_apicid_map(&address2os, &msg_id);
3753 KMP_ASSERT(msg_id != kmp_i18n_null);
3754 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3760 else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) {
3761 const char *filename;
3762 if (__kmp_cpuinfo_file != NULL) {
3763 filename = __kmp_cpuinfo_file;
3766 filename =
"/proc/cpuinfo";
3769 if (__kmp_affinity_verbose) {
3770 KMP_INFORM(AffParseFilename,
"KMP_AFFINITY", filename);
3773 FILE *f = fopen(filename,
"r");
3776 if (__kmp_cpuinfo_file != NULL) {
3779 KMP_MSG(CantOpenFileForReading, filename),
3781 KMP_HNT(NameComesFrom_CPUINFO_FILE),
3788 KMP_MSG(CantOpenFileForReading, filename),
3795 depth = __kmp_affinity_create_cpuinfo_map(&address2os, &line, &msg_id, f);
3798 KMP_ASSERT(msg_id != kmp_i18n_null);
3800 KMP_FATAL(FileLineMsgExiting, filename, line, __kmp_i18n_catgets(msg_id));
3803 KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id));
3806 if (__kmp_affinity_type == affinity_none) {
3807 KMP_ASSERT(depth == 0);
3812 # if KMP_GROUP_AFFINITY 3814 else if (__kmp_affinity_top_method == affinity_top_method_group) {
3815 if (__kmp_affinity_verbose) {
3816 KMP_INFORM(AffWindowsProcGroupMap,
"KMP_AFFINITY");
3819 depth = __kmp_affinity_create_proc_group_map(&address2os, &msg_id);
3820 KMP_ASSERT(depth != 0);
3822 KMP_ASSERT(msg_id != kmp_i18n_null);
3823 KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id));
3829 else if (__kmp_affinity_top_method == affinity_top_method_flat) {
3830 if (__kmp_affinity_verbose) {
3831 KMP_INFORM(AffUsingFlatOS,
"KMP_AFFINITY");
3834 depth = __kmp_affinity_create_flat_map(&address2os, &msg_id);
3839 KMP_ASSERT(depth > 0);
3840 KMP_ASSERT(address2os != NULL);
3844 else if (__kmp_affinity_top_method == affinity_top_method_hwloc) {
3845 if (__kmp_affinity_verbose) {
3846 KMP_INFORM(AffUsingHwloc,
"KMP_AFFINITY");
3848 depth = __kmp_affinity_create_hwloc_map(&address2os, &msg_id);
3853 # endif // KMP_USE_HWLOC 3855 if (address2os == NULL) {
3856 if (KMP_AFFINITY_CAPABLE()
3857 && (__kmp_affinity_verbose || (__kmp_affinity_warnings
3858 && (__kmp_affinity_type != affinity_none)))) {
3859 KMP_WARNING(ErrorInitializeAffinity);
3861 __kmp_affinity_type = affinity_none;
3862 KMP_AFFINITY_DISABLE();
3866 __kmp_apply_thread_places(&address2os, depth);
3873 kmp_affin_mask_t *osId2Mask = __kmp_create_masks(&maxIndex, &numUnique,
3874 address2os, __kmp_avail_proc);
3875 if (__kmp_affinity_gran_levels == 0) {
3876 KMP_DEBUG_ASSERT((
int)numUnique == __kmp_avail_proc);
3884 __kmp_affinity_assign_child_nums(address2os, __kmp_avail_proc);
3886 switch (__kmp_affinity_type) {
3888 case affinity_explicit:
3889 KMP_DEBUG_ASSERT(__kmp_affinity_proclist != NULL);
3891 if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel)
3894 __kmp_affinity_process_proclist(&__kmp_affinity_masks,
3895 &__kmp_affinity_num_masks, __kmp_affinity_proclist, osId2Mask,
3900 __kmp_affinity_process_placelist(&__kmp_affinity_masks,
3901 &__kmp_affinity_num_masks, __kmp_affinity_proclist, osId2Mask,
3905 if (__kmp_affinity_num_masks == 0) {
3906 if (__kmp_affinity_verbose || (__kmp_affinity_warnings
3907 && (__kmp_affinity_type != affinity_none))) {
3908 KMP_WARNING(AffNoValidProcID);
3910 __kmp_affinity_type = affinity_none;
3923 case affinity_logical:
3924 __kmp_affinity_compact = 0;
3925 if (__kmp_affinity_offset) {
3926 __kmp_affinity_offset = __kmp_nThreadsPerCore * __kmp_affinity_offset
3931 case affinity_physical:
3932 if (__kmp_nThreadsPerCore > 1) {
3933 __kmp_affinity_compact = 1;
3934 if (__kmp_affinity_compact >= depth) {
3935 __kmp_affinity_compact = 0;
3938 __kmp_affinity_compact = 0;
3940 if (__kmp_affinity_offset) {
3941 __kmp_affinity_offset = __kmp_nThreadsPerCore * __kmp_affinity_offset
3946 case affinity_scatter:
3947 if (__kmp_affinity_compact >= depth) {
3948 __kmp_affinity_compact = 0;
3951 __kmp_affinity_compact = depth - 1 - __kmp_affinity_compact;
3955 case affinity_compact:
3956 if (__kmp_affinity_compact >= depth) {
3957 __kmp_affinity_compact = depth - 1;
3961 case affinity_balanced:
3963 if( nPackages > 1 ) {
3964 if( __kmp_affinity_verbose || __kmp_affinity_warnings ) {
3965 KMP_WARNING( AffBalancedNotAvail,
"KMP_AFFINITY" );
3967 __kmp_affinity_type = affinity_none;
3969 }
else if( __kmp_affinity_uniform_topology() ) {
3974 __kmp_aff_depth = depth;
3977 int nth_per_core = __kmp_nThreadsPerCore;
3980 if( nth_per_core > 1 ) {
3981 core_level = depth - 2;
3983 core_level = depth - 1;
3985 int ncores = address2os[ __kmp_avail_proc - 1 ].first.labels[ core_level ] + 1;
3986 int nproc = nth_per_core * ncores;
3988 procarr = (
int * )__kmp_allocate(
sizeof(
int ) * nproc );
3989 for(
int i = 0; i < nproc; i++ ) {
3993 for(
int i = 0; i < __kmp_avail_proc; i++ ) {
3994 int proc = address2os[ i ].second;
3998 int level = depth - 1;
4002 int core = address2os[ i ].first.labels[ level ];
4004 if( nth_per_core > 1 ) {
4005 thread = address2os[ i ].first.labels[ level ] % nth_per_core;
4006 core = address2os[ i ].first.labels[ level - 1 ];
4008 procarr[ core * nth_per_core + thread ] = proc;
4018 if (__kmp_affinity_dups) {
4019 __kmp_affinity_num_masks = __kmp_avail_proc;
4022 __kmp_affinity_num_masks = numUnique;
4026 if ( ( __kmp_nested_proc_bind.bind_types[0] != proc_bind_intel )
4027 && ( __kmp_affinity_num_places > 0 )
4028 && ( (
unsigned)__kmp_affinity_num_places < __kmp_affinity_num_masks ) ) {
4029 __kmp_affinity_num_masks = __kmp_affinity_num_places;
4033 KMP_CPU_ALLOC_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4039 qsort(address2os, __kmp_avail_proc,
sizeof(*address2os),
4040 __kmp_affinity_cmp_Address_child_num);
4044 for (i = 0, j = 0; i < __kmp_avail_proc; i++) {
4045 if ((! __kmp_affinity_dups) && (! address2os[i].first.leader)) {
4048 unsigned osId = address2os[i].second;
4049 kmp_affin_mask_t *src = KMP_CPU_INDEX(osId2Mask, osId);
4050 kmp_affin_mask_t *dest
4051 = KMP_CPU_INDEX(__kmp_affinity_masks, j);
4052 KMP_ASSERT(KMP_CPU_ISSET(osId, src));
4053 KMP_CPU_COPY(dest, src);
4054 if (++j >= __kmp_affinity_num_masks) {
4058 KMP_DEBUG_ASSERT(j == __kmp_affinity_num_masks);
4063 KMP_ASSERT2(0,
"Unexpected affinity setting");
4066 __kmp_free(osId2Mask);
4067 machine_hierarchy.init(address2os, __kmp_avail_proc);
4069 #undef KMP_EXIT_AFF_NONE 4073 __kmp_affinity_initialize(
void)
4086 int disabled = (__kmp_affinity_type == affinity_disabled);
4087 if (! KMP_AFFINITY_CAPABLE()) {
4088 KMP_ASSERT(disabled);
4091 __kmp_affinity_type = affinity_none;
4093 __kmp_aux_affinity_initialize();
4095 __kmp_affinity_type = affinity_disabled;
4101 __kmp_affinity_uninitialize(
void)
4103 if (__kmp_affinity_masks != NULL) {
4104 KMP_CPU_FREE_ARRAY(__kmp_affinity_masks, __kmp_affinity_num_masks);
4105 __kmp_affinity_masks = NULL;
4107 if (__kmp_affin_fullMask != NULL) {
4108 KMP_CPU_FREE(__kmp_affin_fullMask);
4109 __kmp_affin_fullMask = NULL;
4111 __kmp_affinity_num_masks = 0;
4113 __kmp_affinity_num_places = 0;
4115 if (__kmp_affinity_proclist != NULL) {
4116 __kmp_free(__kmp_affinity_proclist);
4117 __kmp_affinity_proclist = NULL;
4119 if( address2os != NULL ) {
4120 __kmp_free( address2os );
4123 if( procarr != NULL ) {
4124 __kmp_free( procarr );
4128 if (__kmp_hwloc_topology != NULL) {
4129 hwloc_topology_destroy(__kmp_hwloc_topology);
4130 __kmp_hwloc_topology = NULL;
4137 __kmp_affinity_set_init_mask(
int gtid,
int isa_root)
4139 if (! KMP_AFFINITY_CAPABLE()) {
4143 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4144 if (th->th.th_affin_mask == NULL) {
4145 KMP_CPU_ALLOC(th->th.th_affin_mask);
4148 KMP_CPU_ZERO(th->th.th_affin_mask);
4158 kmp_affin_mask_t *mask;
4162 if (__kmp_nested_proc_bind.bind_types[0] == proc_bind_intel)
4165 if ((__kmp_affinity_type == affinity_none) || (__kmp_affinity_type == affinity_balanced)
4167 # if KMP_GROUP_AFFINITY 4168 if (__kmp_num_proc_groups > 1) {
4172 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4174 mask = __kmp_affin_fullMask;
4177 KMP_DEBUG_ASSERT( __kmp_affinity_num_masks > 0 );
4178 i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4179 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4185 || (__kmp_nested_proc_bind.bind_types[0] == proc_bind_false)) {
4186 # if KMP_GROUP_AFFINITY 4187 if (__kmp_num_proc_groups > 1) {
4191 KMP_ASSERT(__kmp_affin_fullMask != NULL);
4193 mask = __kmp_affin_fullMask;
4200 KMP_DEBUG_ASSERT( __kmp_affinity_num_masks > 0 );
4201 i = (gtid + __kmp_affinity_offset) % __kmp_affinity_num_masks;
4202 mask = KMP_CPU_INDEX(__kmp_affinity_masks, i);
4208 th->th.th_current_place = i;
4210 th->th.th_new_place = i;
4211 th->th.th_first_place = 0;
4212 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4215 if (i == KMP_PLACE_ALL) {
4216 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to all places\n",
4220 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to place %d\n",
4225 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to __kmp_affin_fullMask\n",
4229 KA_TRACE(100, (
"__kmp_affinity_set_init_mask: binding T#%d to mask %d\n",
4234 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4236 if (__kmp_affinity_verbose) {
4237 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4238 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4239 th->th.th_affin_mask);
4240 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(), gtid,
4250 if ( __kmp_affinity_type == affinity_none ) {
4251 __kmp_set_system_affinity(th->th.th_affin_mask, FALSE);
4255 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4262 __kmp_affinity_set_place(
int gtid)
4266 if (! KMP_AFFINITY_CAPABLE()) {
4270 kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]);
4272 KA_TRACE(100, (
"__kmp_affinity_set_place: binding T#%d to place %d (current place = %d)\n",
4273 gtid, th->th.th_new_place, th->th.th_current_place));
4278 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4279 KMP_ASSERT(th->th.th_new_place >= 0);
4280 KMP_ASSERT((
unsigned)th->th.th_new_place <= __kmp_affinity_num_masks);
4281 if (th->th.th_first_place <= th->th.th_last_place) {
4282 KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place)
4283 && (th->th.th_new_place <= th->th.th_last_place));
4286 KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place)
4287 || (th->th.th_new_place >= th->th.th_last_place));
4294 kmp_affin_mask_t *mask = KMP_CPU_INDEX(__kmp_affinity_masks,
4295 th->th.th_new_place);
4296 KMP_CPU_COPY(th->th.th_affin_mask, mask);
4297 th->th.th_current_place = th->th.th_new_place;
4299 if (__kmp_affinity_verbose) {
4300 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4301 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4302 th->th.th_affin_mask);
4303 KMP_INFORM(BoundToOSProcSet,
"OMP_PROC_BIND", (kmp_int32)getpid(),
4306 __kmp_set_system_affinity(th->th.th_affin_mask, TRUE);
4313 __kmp_aux_set_affinity(
void **mask)
4319 if (! KMP_AFFINITY_CAPABLE()) {
4323 gtid = __kmp_entry_gtid();
4325 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4326 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4327 (kmp_affin_mask_t *)(*mask));
4328 __kmp_debug_printf(
"kmp_set_affinity: setting affinity mask for thread %d = %s\n",
4332 if (__kmp_env_consistency_check) {
4333 if ((mask == NULL) || (*mask == NULL)) {
4334 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4340 KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t*)(*mask))) {
4341 if (! KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4342 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4344 if (! KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) {
4349 if (num_procs == 0) {
4350 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4353 # if KMP_GROUP_AFFINITY 4354 if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) {
4355 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
4362 th = __kmp_threads[gtid];
4363 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4364 retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4366 KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask));
4370 th->th.th_current_place = KMP_PLACE_UNDEFINED;
4371 th->th.th_new_place = KMP_PLACE_UNDEFINED;
4372 th->th.th_first_place = 0;
4373 th->th.th_last_place = __kmp_affinity_num_masks - 1;
4378 th->th.th_current_task->td_icvs.proc_bind = proc_bind_false;
4386 __kmp_aux_get_affinity(
void **mask)
4392 if (! KMP_AFFINITY_CAPABLE()) {
4396 gtid = __kmp_entry_gtid();
4397 th = __kmp_threads[gtid];
4398 KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
4401 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4402 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4403 th->th.th_affin_mask);
4404 __kmp_printf(
"kmp_get_affinity: stored affinity mask for thread %d = %s\n", gtid, buf);
4407 if (__kmp_env_consistency_check) {
4408 if ((mask == NULL) || (*mask == NULL)) {
4409 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity");
4413 # if !KMP_OS_WINDOWS 4415 retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE);
4417 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4418 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4419 (kmp_affin_mask_t *)(*mask));
4420 __kmp_printf(
"kmp_get_affinity: system affinity mask for thread %d = %s\n", gtid, buf);
4426 KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask);
4434 __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask)
4438 if (! KMP_AFFINITY_CAPABLE()) {
4443 int gtid = __kmp_entry_gtid();
4444 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4445 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4446 (kmp_affin_mask_t *)(*mask));
4447 __kmp_debug_printf(
"kmp_set_affinity_mask_proc: setting proc %d in affinity mask for thread %d = %s\n",
4451 if (__kmp_env_consistency_check) {
4452 if ((mask == NULL) || (*mask == NULL)) {
4453 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity_mask_proc");
4459 || ((
unsigned)proc >= KMP_CPU_SETSIZE)
4464 if (! KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4468 KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask));
4474 __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask)
4478 if (! KMP_AFFINITY_CAPABLE()) {
4483 int gtid = __kmp_entry_gtid();
4484 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4485 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4486 (kmp_affin_mask_t *)(*mask));
4487 __kmp_debug_printf(
"kmp_unset_affinity_mask_proc: unsetting proc %d in affinity mask for thread %d = %s\n",
4491 if (__kmp_env_consistency_check) {
4492 if ((mask == NULL) || (*mask == NULL)) {
4493 KMP_FATAL(AffinityInvalidMask,
"kmp_unset_affinity_mask_proc");
4499 || ((
unsigned)proc >= KMP_CPU_SETSIZE)
4504 if (! KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4508 KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask));
4514 __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask)
4518 if (! KMP_AFFINITY_CAPABLE()) {
4523 int gtid = __kmp_entry_gtid();
4524 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4525 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN,
4526 (kmp_affin_mask_t *)(*mask));
4527 __kmp_debug_printf(
"kmp_get_affinity_mask_proc: getting proc %d in affinity mask for thread %d = %s\n",
4531 if (__kmp_env_consistency_check) {
4532 if ((mask == NULL) || (*mask == NULL)) {
4533 KMP_FATAL(AffinityInvalidMask,
"kmp_get_affinity_mask_proc");
4539 || ((
unsigned)proc >= KMP_CPU_SETSIZE)
4544 if (! KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) {
4548 return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask));
4553 void __kmp_balanced_affinity(
int tid,
int nthreads )
4555 if( __kmp_affinity_uniform_topology() ) {
4559 int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores;
4561 int ncores = __kmp_ncores;
4563 int chunk = nthreads / ncores;
4565 int big_cores = nthreads % ncores;
4567 int big_nth = ( chunk + 1 ) * big_cores;
4568 if( tid < big_nth ) {
4569 coreID = tid / (chunk + 1 );
4570 threadID = ( tid % (chunk + 1 ) ) % __kmp_nth_per_core ;
4572 coreID = ( tid - big_cores ) / chunk;
4573 threadID = ( ( tid - big_cores ) % chunk ) % __kmp_nth_per_core ;
4576 KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(),
4577 "Illegal set affinity operation when not capable");
4579 kmp_affin_mask_t *mask;
4580 KMP_CPU_ALLOC_ON_STACK(mask);
4584 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4585 int osID = address2os[ coreID * __kmp_nth_per_core + threadID ].second;
4586 KMP_CPU_SET( osID, mask);
4587 }
else if( __kmp_affinity_gran == affinity_gran_core ) {
4588 for(
int i = 0; i < __kmp_nth_per_core; i++ ) {
4590 osID = address2os[ coreID * __kmp_nth_per_core + i ].second;
4591 KMP_CPU_SET( osID, mask);
4594 if (__kmp_affinity_verbose) {
4595 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4596 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
4597 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
4600 __kmp_set_system_affinity( mask, TRUE );
4601 KMP_CPU_FREE_FROM_STACK(mask);
4604 kmp_affin_mask_t *mask;
4605 KMP_CPU_ALLOC_ON_STACK(mask);
4609 int nth_per_core = __kmp_nThreadsPerCore;
4611 if( nth_per_core > 1 ) {
4612 core_level = __kmp_aff_depth - 2;
4614 core_level = __kmp_aff_depth - 1;
4618 int ncores = address2os[ __kmp_avail_proc - 1 ].first.labels[ core_level ] + 1;
4621 if( nthreads == __kmp_avail_proc ) {
4622 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4623 int osID = address2os[ tid ].second;
4624 KMP_CPU_SET( osID, mask);
4625 }
else if( __kmp_affinity_gran == affinity_gran_core ) {
4626 int coreID = address2os[ tid ].first.labels[ core_level ];
4630 for(
int i = 0; i < __kmp_avail_proc; i++ ) {
4631 int osID = address2os[ i ].second;
4632 int core = address2os[ i ].first.labels[ core_level ];
4633 if( core == coreID ) {
4634 KMP_CPU_SET( osID, mask);
4636 if( cnt == nth_per_core ) {
4642 }
else if( nthreads <= __kmp_ncores ) {
4645 for(
int i = 0; i < ncores; i++ ) {
4648 for(
int j = 0; j < nth_per_core; j++ ) {
4649 if( procarr[ i * nth_per_core + j ] != - 1 ) {
4656 for(
int j = 0; j < nth_per_core; j++ ) {
4657 int osID = procarr[ i * nth_per_core + j ];
4659 KMP_CPU_SET( osID, mask );
4661 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4676 int* nproc_at_core = (
int*)KMP_ALLOCA(
sizeof(
int)*ncores);
4678 int* ncores_with_x_procs = (
int*)KMP_ALLOCA(
sizeof(
int)*(nth_per_core+1));
4680 int* ncores_with_x_to_max_procs = (
int*)KMP_ALLOCA(
sizeof(
int)*(nth_per_core+1));
4682 for(
int i = 0; i <= nth_per_core; i++ ) {
4683 ncores_with_x_procs[ i ] = 0;
4684 ncores_with_x_to_max_procs[ i ] = 0;
4687 for(
int i = 0; i < ncores; i++ ) {
4689 for(
int j = 0; j < nth_per_core; j++ ) {
4690 if( procarr[ i * nth_per_core + j ] != -1 ) {
4694 nproc_at_core[ i ] = cnt;
4695 ncores_with_x_procs[ cnt ]++;
4698 for(
int i = 0; i <= nth_per_core; i++ ) {
4699 for(
int j = i; j <= nth_per_core; j++ ) {
4700 ncores_with_x_to_max_procs[ i ] += ncores_with_x_procs[ j ];
4705 int nproc = nth_per_core * ncores;
4707 int * newarr = (
int * )__kmp_allocate(
sizeof(
int ) * nproc );
4708 for(
int i = 0; i < nproc; i++ ) {
4715 for(
int j = 1; j <= nth_per_core; j++ ) {
4716 int cnt = ncores_with_x_to_max_procs[ j ];
4717 for(
int i = 0; i < ncores; i++ ) {
4719 if( nproc_at_core[ i ] == 0 ) {
4722 for(
int k = 0; k < nth_per_core; k++ ) {
4723 if( procarr[ i * nth_per_core + k ] != -1 ) {
4724 if( newarr[ i * nth_per_core + k ] == 0 ) {
4725 newarr[ i * nth_per_core + k ] = 1;
4731 newarr[ i * nth_per_core + k ] ++;
4739 if( cnt == 0 || nth == 0 ) {
4750 for(
int i = 0; i < nproc; i++ ) {
4754 if( __kmp_affinity_gran == affinity_gran_fine || __kmp_affinity_gran == affinity_gran_thread) {
4755 int osID = procarr[ i ];
4756 KMP_CPU_SET( osID, mask);
4757 }
else if( __kmp_affinity_gran == affinity_gran_core ) {
4758 int coreID = i / nth_per_core;
4759 for(
int ii = 0; ii < nth_per_core; ii++ ) {
4760 int osID = procarr[ coreID * nth_per_core + ii ];
4762 KMP_CPU_SET( osID, mask);
4769 __kmp_free( newarr );
4772 if (__kmp_affinity_verbose) {
4773 char buf[KMP_AFFIN_MASK_PRINT_LEN];
4774 __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask);
4775 KMP_INFORM(BoundToOSProcSet,
"KMP_AFFINITY", (kmp_int32)getpid(),
4778 __kmp_set_system_affinity( mask, TRUE );
4779 KMP_CPU_FREE_FROM_STACK(mask);
4797 kmp_set_thread_affinity_mask_initial()
4802 int gtid = __kmp_get_gtid();
4805 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: " 4806 "non-omp thread, returning\n"));
4809 if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) {
4810 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: " 4811 "affinity not initialized, returning\n"));
4814 KA_TRACE(30, (
"kmp_set_thread_affinity_mask_initial: " 4815 "set full mask for thread %d\n", gtid));
4816 KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL);
4817 return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE);
4821 #endif // KMP_AFFINITY_SUPPORTED