[OpenMP] NFC: Fix trivial typo

Differential Revision: https://reviews.llvm.org/D77430
This commit is contained in:
Kazuaki Ishizaki 2020-04-04 12:06:29 +09:00
parent 3270748127
commit 4201679110
57 changed files with 120 additions and 120 deletions

View File

@ -91,5 +91,5 @@ if (OPENMP_ENABLE_OMPT_TOOLS)
add_subdirectory(tools)
endif()
# Now that we have seen all testuites, create the check-openmp target.
# Now that we have seen all testsuites, create the check-openmp target.
construct_check_openmp_target()

View File

@ -200,7 +200,7 @@ NOINLINE static void check(bool cond) { assert(cond); }
#endif
// for printing without worring about precision, pointers...
// for printing without worrying about precision, pointers...
#define P64(_x) ((unsigned long long)(_x))
////////////////////////////////////////////////////////////////////////////////

View File

@ -206,8 +206,8 @@ public:
INLINE void InitTeamDescr();
INLINE __kmpc_data_sharing_slot *RootS(int wid, bool IsMasterThread) {
// If this is invoked by the master thread of the master warp then intialize
// it with a smaller slot.
// If this is invoked by the master thread of the master warp then
// initialize it with a smaller slot.
if (IsMasterThread) {
// Do not initialize this slot again if it has already been initalized.
if (master_rootS[0].DataEnd == &master_rootS[0].Data[0] + DS_Slot_Size)

View File

@ -42,7 +42,7 @@ omptarget_nvptx_TaskDescr::InitLevelZeroTaskDescr() {
items.flags = 0;
items.threadId = 0; // is master
items.runtimeChunkSize = 1; // prefered chunking statik with chunk 1
items.runtimeChunkSize = 1; // preferred chunking statik with chunk 1
}
// This is called when all threads are started together in SPMD mode.
@ -59,7 +59,7 @@ INLINE void omptarget_nvptx_TaskDescr::InitLevelOneTaskDescr(
TaskDescr_InPar | TaskDescr_IsParConstr; // set flag to parallel
items.threadId =
GetThreadIdInBlock(); // get ids from cuda (only called for 1st level)
items.runtimeChunkSize = 1; // prefered chunking statik with chunk 1
items.runtimeChunkSize = 1; // preferred chunking statik with chunk 1
prev = parentTaskDescr;
}
@ -90,7 +90,7 @@ INLINE void omptarget_nvptx_TaskDescr::CopyForExplicitTask(
INLINE void omptarget_nvptx_TaskDescr::CopyToWorkDescr(
omptarget_nvptx_TaskDescr *masterTaskDescr) {
CopyParent(masterTaskDescr);
// overrwrite specific items;
// overwrite specific items;
items.flags |=
TaskDescr_InPar | TaskDescr_IsParConstr; // set flag to parallel
}
@ -99,7 +99,7 @@ INLINE void omptarget_nvptx_TaskDescr::CopyFromWorkDescr(
omptarget_nvptx_TaskDescr *workTaskDescr) {
Copy(workTaskDescr);
//
// overrwrite specific items;
// overwrite specific items;
//
// The threadID should be GetThreadIdInBlock() % GetMasterThreadID().
// This is so that the serial master (first lane in the master warp)

View File

@ -90,7 +90,7 @@ EXTERN int omp_in_parallel(void) {
EXTERN int omp_in_final(void) {
// treat all tasks as final... Specs may expect runtime to keep
// track more precisely if a task was actively set by users... This
// is not explicitely specified; will treat as if runtime can
// is not explicitly specified; will treat as if runtime can
// actively decide to put a non-final task into a final one.
int rc = 1;
PRINT(LD_IO, "call omp_in_final() returns %d\n", rc);

View File

@ -221,7 +221,7 @@ public:
* When it is we'll want to look at them somewhere here and use that
* information to add to our schedule choice. We shouldn't need to pass
* them on, they merely affect which schedule we can legally choose for
* various dynamic cases. (In paritcular, whether or not a stealing scheme
* various dynamic cases. (In particular, whether or not a stealing scheme
* is legal).
*/
schedule = SCHEDULE_WITHOUT_MODIFIERS(schedule);

View File

@ -124,7 +124,7 @@ EXTERN void __kmpc_end_single(kmp_Ident *loc, int32_t global_tid) {
PRINT0(LD_IO, "call kmpc_end_single\n");
// decide to implement single with master: master get the single
ASSERT0(LT_FUSSY, IsTeamMaster(global_tid), "expected only master here");
// sync barrier is explicitely called... so that is not a problem
// sync barrier is explicitly called... so that is not a problem
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -98,7 +98,7 @@ EXTERN int omp_get_max_task_priority(void);
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// kmp specifc types
// kmp specific types
////////////////////////////////////////////////////////////////////////////////
typedef enum kmp_sched_t {

View File

@ -63,7 +63,7 @@ The (simplified) pseudo code generated by LLVM is as follows:
b. its lane_id
c. the offset of the lane_id which hosts a remote ReduceData
relative to the current one
d. an algorithm version paramter determining which reduction
d. an algorithm version parameter determining which reduction
algorithm to use.
This shuffleReduceFn retrieves the remote ReduceData through shuffle
intrinsics and reduces, using the algorithm specified by the 4th

View File

@ -70,7 +70,7 @@ int main(int argc, char *argv[]) {
check4Inc += (omp_get_ancestor_thread_num(1) - id);
// Expected to return the number of threads in the active parallel region.
check4Inc += 3 * omp_get_team_size(1);
// Exptected to return 0 and 1.
// Expected to return 0 and 1.
check4Inc += omp_get_ancestor_thread_num(2) + 3 * omp_get_team_size(2);
// Expected to return -1, see above.
check4Inc += omp_get_ancestor_thread_num(3) + omp_get_team_size(3);

View File

@ -26,7 +26,7 @@ kmp_target_offload_kind_t TargetOffloadPolicy = tgt_default;
std::mutex TargetOffloadMtx;
////////////////////////////////////////////////////////////////////////////////
/// manage the success or failure of a target constuct
/// manage the success or failure of a target construct
static void HandleDefaultTargetOffload() {
TargetOffloadMtx.lock();

View File

@ -25,7 +25,7 @@
# - Fails if stack is executable. Should only be readable and writable. Not executable.
# - Program dependencies: perl, readelf
# - Available for Unix dynamic library builds. Not available otherwise.
# (4) test-instr (Intel(R) MIC Architecutre only)
# (4) test-instr (Intel(R) MIC Architecture only)
# - Tests Intel(R) MIC Architecture libraries for valid instruction set
# - Fails if finds invalid instruction for Intel(R) MIC Architecture (wasn't compiled with correct flags)
# - Program dependencies: perl, objdump

View File

@ -183,7 +183,7 @@ if(WIN32)
add_custom_target(libomp-needed-windows-files DEPENDS ${LIBOMP_LIB_NAME}.def)
add_dependencies(omp libomp-needed-windows-files)
# z_Windows_NT-586_asm.asm requires definitions to be sent via command line
# It only needs the architecutre macro and OMPT_SUPPORT=0|1
# It only needs the architecture macro and OMPT_SUPPORT=0|1
libomp_append(LIBOMP_MASM_DEFINITIONS "-D_M_IA32" IF_TRUE IA32)
libomp_append(LIBOMP_MASM_DEFINITIONS "-D_M_AMD64" IF_TRUE INTEL64)
libomp_append(LIBOMP_MASM_DEFINITIONS "-DOMPT_SUPPORT" IF_TRUE_1_0 LIBOMP_OMPT_SUPPORT)

View File

@ -324,7 +324,7 @@ WrongMessageCatalog "Incompatible message catalog \"%1$s\": Version \"%
StgIgnored "%1$s: ignored because %2$s has been defined"
# %1, -- name of ignored variable, %2 -- name of variable with higher priority.
OBSOLETE "%1$s: overrides %3$s specified before"
# %1, %2 -- name and value of the overriding variable, %3 -- name of overriden variable.
# %1, %2 -- name and value of the overriding variable, %3 -- name of overridden variable.
AffTilesNoHWLOC "%1$s: Tiles are only supported if KMP_TOPOLOGY_METHOD=hwloc, using granularity=package instead"
AffTilesNoTiles "%1$s: Tiles requested but were not detected on this HW, using granularity=package instead"
TopologyExtraTile "%1$s: %2$d packages x %3$d tiles/pkg x %4$d cores/tile x %5$d threads/core (%6$d total cores)"

View File

@ -2435,7 +2435,7 @@ typedef struct KMP_ALIGN_CACHE kmp_base_info {
int th_teams_level; /* save initial level of teams construct */
/* it is 0 on device but may be any on host */
/* The blocktime info is copied from the team struct to the thread sruct */
/* The blocktime info is copied from the team struct to the thread struct */
/* at the start of a barrier, and the values stored in the team are used */
/* at points in the code where the team struct is no longer guaranteed */
/* to exist (from the POV of worker threads). */

View File

@ -601,7 +601,7 @@ static int __kmp_affinity_create_hwloc_map(AddrUnsPair **address2os,
int depth = 3;
int levels[5] = {0, 1, 2, 3, 4}; // package, [node,] [tile,] core, thread
int labels[3] = {0}; // package [,node] [,tile] - head of lables array
int labels[3] = {0}; // package [,node] [,tile] - head of labels array
if (__kmp_numa_detected)
++depth;
if (__kmp_tile_depth)
@ -828,7 +828,7 @@ static int __kmp_affinity_create_hwloc_map(AddrUnsPair **address2os,
}
int depth_full = depth; // number of levels before compressing
// Find any levels with radiix 1, and remove them from the map
// Find any levels with radix 1, and remove them from the map
// (except for the package level).
depth = __kmp_affinity_remove_radix_one_levels(retval, nActiveThreads, depth,
levels);
@ -918,7 +918,7 @@ static int __kmp_affinity_create_flat_map(AddrUnsPair **address2os,
return 0;
}
// Contruct the data structure to be returned.
// Construct the data structure to be returned.
*address2os =
(AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
int avail_ct = 0;
@ -967,7 +967,7 @@ static int __kmp_affinity_create_proc_group_map(AddrUnsPair **address2os,
return -1;
}
// Contruct the data structure to be returned.
// Construct the data structure to be returned.
*address2os =
(AddrUnsPair *)__kmp_allocate(sizeof(**address2os) * __kmp_avail_proc);
KMP_DEBUG_ASSERT(__kmp_pu_os_idx == NULL);
@ -1849,7 +1849,7 @@ static int __kmp_affinity_create_x2apicid_map(AddrUnsPair **address2os,
return 0;
}
// Find any levels with radiix 1, and remove them from the map
// Find any levels with radix 1, and remove them from the map
// (except for the package level).
int new_depth = 0;
for (level = 0; level < depth; level++) {
@ -4328,7 +4328,7 @@ static void __kmp_aux_affinity_initialize(void) {
}
#endif // KMP_USE_HWLOC
// If the user has specified that a paricular topology discovery method is to be
// If the user has specified that a particular topology discovery method is to be
// used, then we abort if that method fails. The exception is group affinity,
// which might have been implicitly set.
@ -4647,7 +4647,7 @@ static void __kmp_aux_affinity_initialize(void) {
#undef KMP_EXIT_AFF_NONE
void __kmp_affinity_initialize(void) {
// Much of the code above was written assumming that if a machine was not
// Much of the code above was written assuming that if a machine was not
// affinity capable, then __kmp_affinity_type == affinity_none. We now
// explicitly represent this as __kmp_affinity_type == affinity_disabled.
// There are too many checks for __kmp_affinity_type == affinity_none
@ -4713,7 +4713,7 @@ void __kmp_affinity_set_init_mask(int gtid, int isa_root) {
KMP_CPU_ZERO(th->th.th_affin_mask);
}
// Copy the thread mask to the kmp_info_t strucuture. If
// Copy the thread mask to the kmp_info_t structure. If
// __kmp_affinity_type == affinity_none, copy the "full" mask, i.e. one that
// has all of the OS proc ids set, or if __kmp_affinity_respect_mask is set,
// then the full mask is the same as the mask of the initialization thread.
@ -4823,7 +4823,7 @@ void __kmp_affinity_set_place(int gtid) {
(th->th.th_new_place >= th->th.th_last_place));
}
// Copy the thread mask to the kmp_info_t strucuture,
// Copy the thread mask to the kmp_info_t structure,
// and set this thread's affinity.
kmp_affin_mask_t *mask =
KMP_CPU_INDEX(__kmp_affinity_masks, th->th.th_new_place);

View File

@ -186,7 +186,7 @@ typedef struct thr_data {
-1: not all pool blocks are the same size
>0: (common) block size for all bpool calls made so far
*/
bfhead_t *last_pool; /* Last pool owned by this thread (delay dealocation) */
bfhead_t *last_pool; /* Last pool owned by this thread (delay deallocation) */
} thr_data_t;
/* Minimum allocation quantum: */
@ -195,7 +195,7 @@ typedef struct thr_data {
#define MaxSize \
(bufsize)( \
~(((bufsize)(1) << (sizeof(bufsize) * CHAR_BIT - 1)) | (SizeQuant - 1)))
// Maximun for the requested size.
// Maximum for the requested size.
/* End sentinel: value placed in bsize field of dummy block delimiting
end of pool block. The most negative number which will fit in a
@ -577,7 +577,7 @@ static void *bget(kmp_info_t *th, bufsize requested_size) {
if (thr->acqfcn != 0) {
if (size > (bufsize)(thr->exp_incr - sizeof(bhead_t))) {
/* Request is too large to fit in a single expansion block.
Try to satisy it by a direct buffer acquisition. */
Try to satisfy it by a direct buffer acquisition. */
bdhead_t *bdh;
size += sizeof(bdhead_t) - sizeof(bhead_t);
@ -1961,7 +1961,7 @@ void ___kmp_fast_free(kmp_info_t *this_thr, void *ptr KMP_SRC_LOC_DECL) {
this_thr->th.th_free_lists[index].th_free_list_other = ptr;
} else {
// either queue blocks owner is changing or size limit exceeded
// return old queue to allocating thread (q_th) synchroneously,
// return old queue to allocating thread (q_th) synchronously,
// and start new list for alloc_thr's tasks
void *old_ptr;
void *tail = head;

View File

@ -92,7 +92,7 @@ construct, since the master thread is necessarily thread zero).
If multiple non-OpenMP threads all enter an OpenMP construct then this
will be a unique thread identifier among all the threads created by
the OpenMP runtime (but the value cannote be defined in terms of
the OpenMP runtime (but the value cannot be defined in terms of
OpenMP thread ids returned by omp_get_thread_num()).
*/
kmp_int32 __kmpc_global_thread_num(ident_t *loc) {

View File

@ -1250,7 +1250,7 @@ int __kmp_dispatch_next_algorithm(int gtid,
pr->u.p.parm4 = (victimIdx + 1) % nproc; // next victim
continue; // not enough chunks to steal
}
// stealing succeded, reduce victim's ub by 1/4 of undone chunks or
// stealing succeeded, reduce victim's ub by 1/4 of undone chunks or
// by 1
if (remaining > 3) {
// steal 1/4 of remaining
@ -1357,7 +1357,7 @@ int __kmp_dispatch_next_algorithm(int gtid,
(volatile kmp_int64 *)&victim->u.p.count,
*VOLATILE_CAST(kmp_int64 *) & vold.b,
*VOLATILE_CAST(kmp_int64 *) & vnew.b)) {
// stealing succedded
// stealing succeeded
KMP_COUNT_DEVELOPER_VALUE(FOR_static_steal_stolen,
vold.p.ub - vnew.p.ub);
status = 1;
@ -1372,7 +1372,7 @@ int __kmp_dispatch_next_algorithm(int gtid,
#endif
break;
} // if (check CAS result)
KMP_CPU_PAUSE(); // CAS failed, repeate attempt
KMP_CPU_PAUSE(); // CAS failed, repeatedly attempt
} // while (try to steal from particular victim)
} // while (search for victim)
} // if (try to find victim and steal)
@ -1532,7 +1532,7 @@ int __kmp_dispatch_next_algorithm(int gtid,
}
if ((T)remaining <
pr->u.p.parm2) { // compare with K*nproc*(chunk+1), K=2 by default
// use dynamic-style shcedule
// use dynamic-style schedule
// atomically increment iterations, get old value
init = test_then_add<ST>(RCAST(volatile ST *, &sh->u.s.iteration),
(ST)chunkspec);
@ -1601,7 +1601,7 @@ int __kmp_dispatch_next_algorithm(int gtid,
KMP_DEBUG_ASSERT(init % chunk == 0);
// compare with K*nproc*(chunk+1), K=2 by default
if ((T)remaining < pr->u.p.parm2) {
// use dynamic-style shcedule
// use dynamic-style schedule
// atomically increment iterations, get old value
init = test_then_add<ST>(RCAST(volatile ST *, &sh->u.s.iteration),
(ST)chunk);
@ -1892,7 +1892,7 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
typedef typename traits_t<T>::unsigned_t UT;
typedef typename traits_t<T>::signed_t ST;
// This is potentially slightly misleading, schedule(runtime) will appear here
// even if the actual runtme schedule is static. (Which points out a
// even if the actual runtime schedule is static. (Which points out a
// disadvantage of schedule(runtime): even when static scheduling is used it
// costs more than a compile time choice to use static scheduling would.)
KMP_TIME_PARTITIONED_BLOCK(OMP_loop_dynamic_scheduling);
@ -1909,7 +1909,7 @@ static int __kmp_dispatch_next(ident_t *loc, int gtid, kmp_int32 *p_last,
gtid, p_lb, p_ub, p_st, p_last));
if (team->t.t_serialized) {
/* NOTE: serialize this dispatch becase we are not at the active level */
/* NOTE: serialize this dispatch because we are not at the active level */
pr = reinterpret_cast<dispatch_private_info_template<T> *>(
th->th.th_dispatch->th_disp_buffer); /* top of the stack */
KMP_DEBUG_ASSERT(pr);

View File

@ -993,7 +993,7 @@ void __kmp_dispatch_init_hierarchy(ident_t *loc, int n,
th->th.th_hier_bar_data = (kmp_hier_private_bdata_t *)__kmp_allocate(
sizeof(kmp_hier_private_bdata_t) * kmp_hier_layer_e::LAYER_LAST);
}
// Have threads "register" themselves by modifiying the active count for each
// Have threads "register" themselves by modifying the active count for each
// level they are involved in. The active count will act as nthreads for that
// level regarding the scheduling algorithms
for (int i = 0; i < n; ++i) {

View File

@ -1,5 +1,5 @@
/*
* kmp_environment.h -- Handle environment varoiables OS-independently.
* kmp_environment.h -- Handle environment variables OS-independently.
*/
//===----------------------------------------------------------------------===//

View File

@ -275,7 +275,7 @@ void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_END)(void) {
#define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
#define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
// The parallel contruct
// The parallel construct
#ifndef KMP_DEBUG
static
@ -325,7 +325,7 @@ static
enum sched_type schedule, long start,
long end, long incr,
long chunk_size) {
// Intialize the loop worksharing construct.
// Initialize the loop worksharing construct.
KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
schedule != kmp_sch_static);

View File

@ -639,7 +639,7 @@ kmp_msg_t __kmp_msg_format(unsigned id_arg, ...) {
// numbers, for example: "%2$s %1$s".
__kmp_str_buf_vprint(&buffer, __kmp_i18n_catgets(id), args);
#elif KMP_OS_WINDOWS
// On Winodws, printf() family functions does not recognize GNU style
// On Windows, printf() family functions does not recognize GNU style
// parameter numbers, so we have to use FormatMessage() instead. It recognizes
// parameter numbers, e. g.: "%2!s! "%1!s!".
{

View File

@ -32,7 +32,7 @@ extern "C" {
__kmp_i18n_catgets() returns read-only string. It should not be freed.
KMP_I18N_STR macro simplifies acces to strings in message catalog a bit.
KMP_I18N_STR macro simplifies access to strings in message catalog a bit.
Following two lines are equivalent:
__kmp_i18n_catgets( kmp_i18n_str_Warning )

View File

@ -3021,7 +3021,7 @@ kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
static kmp_indirect_lock_t *__kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = {0};
// User lock allocator for dynamically dispatched indirect locks. Every entry of
// the indirect lock table holds the address and type of the allocated indrect
// the indirect lock table holds the address and type of the allocated indirect
// lock (kmp_indirect_lock_t), and the size of the table doubles when it is
// full. A destroyed indirect lock object is returned to the reusable pool of
// locks, unique to each lock type.

View File

@ -42,7 +42,7 @@ typedef struct ident ident_t;
// ----------------------------------------------------------------------------
// We need to know the size of the area we can assume that the compiler(s)
// allocated for obects of type omp_lock_t and omp_nest_lock_t. The Intel
// allocated for objects of type omp_lock_t and omp_nest_lock_t. The Intel
// compiler always allocates a pointer-sized area, as does visual studio.
//
// gcc however, only allocates 4 bytes for regular locks, even on 64-bit
@ -861,11 +861,11 @@ __kmp_destroy_nested_user_lock_with_checks(kmp_user_lock_p lck) {
//
// In other cases, the calling code really should differentiate between an
// unimplemented function and one that is implemented but returning NULL /
// invalied value. If this is the case, no get function wrapper exists.
// invalid value. If this is the case, no get function wrapper exists.
extern int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck);
// no set function; fields set durining local allocation
// no set function; fields set during local allocation
extern const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck);
@ -899,7 +899,7 @@ static inline void __kmp_set_user_lock_flags(kmp_user_lock_p lck,
}
}
// The fuction which sets up all of the vtbl pointers for kmp_user_lock_t.
// The function which sets up all of the vtbl pointers for kmp_user_lock_t.
extern void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind);
// Macros for binding user lock functions.
@ -1128,7 +1128,7 @@ extern int (**__kmp_direct_unset)(kmp_dyna_lock_t *, kmp_int32);
extern int (**__kmp_direct_test)(kmp_dyna_lock_t *, kmp_int32);
// Function tables for indirect locks. Set/unset/test differentiate functions
// with/withuot consistency checking.
// with/without consistency checking.
extern void (*__kmp_indirect_init[])(kmp_user_lock_p);
extern void (**__kmp_indirect_destroy)(kmp_user_lock_p);
extern int (**__kmp_indirect_set)(kmp_user_lock_p, kmp_int32);

View File

@ -47,7 +47,7 @@ typedef struct {
} kmp_omp_nthr_item_t;
typedef struct {
kmp_int32 num; // Number of items in the arrray.
kmp_int32 num; // Number of items in the array.
kmp_uint64 array; // Address of array of kmp_omp_num_threads_item_t.
} kmp_omp_nthr_info_t;

View File

@ -3529,7 +3529,7 @@ static int __kmp_expand_threads(int nNeed) {
// > __kmp_max_nth in one of two ways:
//
// 1) The initialization thread (gtid = 0) exits. __kmp_threads[0]
// may not be resused by another thread, so we may need to increase
// may not be reused by another thread, so we may need to increase
// __kmp_threads_capacity to __kmp_max_nth + 1.
//
// 2) New foreign root(s) are encountered. We always register new foreign
@ -4515,11 +4515,11 @@ __kmp_set_thread_affinity_mask_full_tmp(kmp_affin_mask_t *old_mask) {
#if KMP_AFFINITY_SUPPORTED
// __kmp_partition_places() is the heart of the OpenMP 4.0 affinity mechanism.
// It calculats the worker + master thread's partition based upon the parent
// It calculates the worker + master thread's partition based upon the parent
// thread's partition, and binds each worker to a thread in their partition.
// The master thread's partition should already include its current binding.
static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
// Copy the master thread's place partion to the team struct
// Copy the master thread's place partition to the team struct
kmp_info_t *master_th = team->t.t_threads[0];
KMP_DEBUG_ASSERT(master_th != NULL);
kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
@ -5536,7 +5536,7 @@ kmp_team_t *__kmp_reap_team(kmp_team_t *team) {
// locality problems on programs where the size of the hot team regularly
// grew and shrunk.
//
// Now, for single-level parallelism, the OMP tid is alway == gtid.
// Now, for single-level parallelism, the OMP tid is always == gtid.
void __kmp_free_thread(kmp_info_t *this_th) {
int gtid;
kmp_info_t **scan;
@ -5609,7 +5609,7 @@ void __kmp_free_thread(kmp_info_t *this_th) {
// scan is the address of a link in the list, possibly the address of
// __kmp_thread_pool itself.
//
// In the absence of nested parallism, the for loop will have 0 iterations.
// In the absence of nested parallelism, the for loop will have 0 iterations.
if (__kmp_thread_pool_insert_pt != NULL) {
scan = &(__kmp_thread_pool_insert_pt->th.th_next_pool);
} else {
@ -6088,7 +6088,7 @@ void __kmp_internal_end_library(int gtid_req) {
only place to clear __kmp_serial_init */
/* we'll check this later too, after we get the lock */
// 2009-09-06: We do not set g_abort without setting g_done. This check looks
// redundaant, because the next check will work in any case.
// redundant, because the next check will work in any case.
if (__kmp_global.g.g_abort) {
KA_TRACE(11, ("__kmp_internal_end_library: abort, exiting\n"));
/* TODO abort? */

View File

@ -667,7 +667,7 @@ static void __kmp_team_static_init(ident_t *loc, kmp_int32 gtid,
// stride for next chunks calculation.
// Last iteration flag set for the team that will execute
// the last iteration of the loop.
// The routine is called for dist_schedue(static,chunk) only.
// The routine is called for dist_schedule(static,chunk) only.
typedef typename traits_t<T>::unsigned_t UT;
typedef typename traits_t<T>::signed_t ST;
kmp_uint32 team_id;

View File

@ -364,7 +364,7 @@ static void __kmp_stg_parse_int(
char const
*name, // I: Name of environment variable (used in warning messages).
char const *value, // I: Value of environment variable to parse.
int min, // I: Miminal allowed value.
int min, // I: Minimum allowed value.
int max, // I: Maximum allowed value.
int *out // O: Output (parsed) value.
) {
@ -1305,7 +1305,7 @@ static void __kmp_stg_print_max_task_priority(kmp_str_buf_t *buffer,
} // __kmp_stg_print_max_task_priority
// KMP_TASKLOOP_MIN_TASKS
// taskloop threashold to switch from recursive to linear tasks creation
// taskloop threshold to switch from recursive to linear tasks creation
static void __kmp_stg_parse_taskloop_min_tasks(char const *name,
char const *value, void *data) {
int tmp;
@ -2041,7 +2041,7 @@ static void __kmp_parse_affinity_env(char const *name, char const *value,
// If we see a parse error, emit a warning and scan to the next ",".
//
// FIXME - there's got to be a better way to print an error
// message, hopefully without overwritting peices of buf.
// message, hopefully without overwriting peices of buf.
#define EMIT_WARN(skip, errlist) \
{ \
char ch; \
@ -4395,7 +4395,7 @@ static void __kmp_stg_print_speculative_statsfile(kmp_str_buf_t *buffer,
// -----------------------------------------------------------------------------
// KMP_HW_SUBSET (was KMP_PLACE_THREADS)
// The longest observable sequense of items is
// The longest observable sequence of items is
// Socket-Node-Tile-Core-Thread
// So, let's limit to 5 levels for now
// The input string is usually short enough, let's use 512 limit for now

View File

@ -270,7 +270,7 @@ void explicitTimer::stop(tsc_tick_count tick,
/* ************* partitionedTimers member functions ************* */
partitionedTimers::partitionedTimers() { timer_stack.reserve(8); }
// initialize the paritioned timers to an initial timer
// initialize the partitioned timers to an initial timer
void partitionedTimers::init(explicitTimer timer) {
KMP_DEBUG_ASSERT(this->timer_stack.size() == 0);
timer_stack.push_back(timer);
@ -609,7 +609,7 @@ void kmp_stats_output_module::printTimerStats(FILE *statsOut,
totalStats[s].format(tag, true).c_str());
}
// Print historgram of statistics
// Print histogram of statistics
if (theStats[0].haveHist()) {
fprintf(statsOut, "\nTimer distributions\n");
for (int s = 0; s < TIMER_LAST; s++) {

View File

@ -195,7 +195,7 @@ enum stats_state_e {
// from a dynamically scheduled loop
// OMP_critical -- Time thread spends executing critical section
// OMP_critical_wait -- Time thread spends waiting to enter
// a critcal seciton
// a critical section
// OMP_single -- Time spent executing a "single" region
// OMP_master -- Time spent executing a "master" region
// OMP_task_immediate -- Time spent executing non-deferred tasks
@ -522,7 +522,7 @@ public:
void windup();
};
// Special wrapper around the partioned timers to aid timing code blocks
// Special wrapper around the partitioned timers to aid timing code blocks
// It avoids the need to have an explicit end, leaving the scope suffices.
class blockPartitionedTimer {
partitionedTimers *part_timers;
@ -920,7 +920,7 @@ extern kmp_stats_output_module __kmp_stats_output;
#define KMP_OUTPUT_STATS(heading_string) __kmp_output_stats(heading_string)
/*!
* \brief Initializes the paritioned timers to begin with name.
* \brief Initializes the partitioned timers to begin with name.
*
* @param name timer which you want this thread to begin with
*

View File

@ -72,12 +72,12 @@ struct kmp_str_fname {
typedef struct kmp_str_fname kmp_str_fname_t;
void __kmp_str_fname_init(kmp_str_fname_t *fname, char const *path);
void __kmp_str_fname_free(kmp_str_fname_t *fname);
// Compares file name with specified patern. If pattern is NULL, any fname
// Compares file name with specified pattern. If pattern is NULL, any fname
// matched.
int __kmp_str_fname_match(kmp_str_fname_t const *fname, char const *pattern);
/* The compiler provides source locations in string form
";file;func;line;col;;". It is not convenient for manupulation. This
";file;func;line;col;;". It is not convenient for manipulation. This
structure keeps source location in more convenient form.
Usage:

View File

@ -147,7 +147,7 @@ void *kmp_malloc(size_t size) {
i;
void *res;
#if KMP_OS_WINDOWS
// If succesfull returns a pointer to the memory block, otherwise returns
// If successful returns a pointer to the memory block, otherwise returns
// NULL.
// Sets errno to ENOMEM or EINVAL if memory allocation failed or parameter
// validation failed.

View File

@ -35,7 +35,7 @@ static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
static void __kmp_init_node(kmp_depnode_t *node) {
node->dn.successors = NULL;
node->dn.task = NULL; // will point to the rigth task
node->dn.task = NULL; // will point to the right task
// once dependences have been processed
for (int i = 0; i < MAX_MTX_DEPS; ++i)
node->dn.mtx_locks[i] = NULL;
@ -473,8 +473,8 @@ static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
npredecessors++;
// Update predecessors and obtain current value to check if there are still
// any outstandig dependences (some tasks may have finished while we processed
// the dependences)
// any outstanding dependences (some tasks may have finished while we
// processed the dependences)
npredecessors =
node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
@ -498,7 +498,7 @@ task''
@param noalias_dep_list List of depend items with no aliasing
@return Returns either TASK_CURRENT_NOT_QUEUED if the current task was not
suspendend and queued, or TASK_CURRENT_QUEUED if it was suspended and queued
suspended and queued, or TASK_CURRENT_QUEUED if it was suspended and queued
Schedule a non-thread-switchable task with dependences for execution
*/

View File

@ -912,7 +912,7 @@ static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
/* If the tasks' destructor thunk flag has been set, we need to invoke the
destructor thunk that has been generated by the compiler. The code is
placed here, since at this point other tasks might have been released
hence overlapping the destructor invokations with some other work in the
hence overlapping the destructor invocations with some other work in the
released tasks. The OpenMP spec is not specific on when the destructors
are invoked, so we should be free to choose. */
if (taskdata->td_flags.destructors_thunk) {
@ -1411,7 +1411,7 @@ __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid,
//
// gtid: global thread ID of caller
// task: the task to invoke
// current_task: the task to resume after task invokation
// current_task: the task to resume after task invocation
static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
kmp_taskdata_t *current_task) {
kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
@ -2911,7 +2911,7 @@ static inline int __kmp_execute_tasks_template(
// met, then return now, so that the barrier gather/release pattern can
// proceed. If this thread is in the last spin loop in the barrier,
// waiting to be released, we know that the termination condition will not
// be satisified, so don't waste any cycles checking it.
// be satisfied, so don't waste any cycles checking it.
if (flag == NULL || (!final_spin && flag->done_check())) {
KA_TRACE(
15,
@ -3096,7 +3096,7 @@ static void __kmp_enable_tasking(kmp_task_team_t *task_team,
* to each thread in the team, so that it can steal work from it.
*
* Enter the existence of the kmp_task_team_t struct. It employs a reference
* counting mechanims, and is allocated by the master thread before calling
* counting mechanism, and is allocated by the master thread before calling
* __kmp_<barrier_kind>_release, and then is release by the last thread to
* exit __kmp_<barrier_kind>_release at the next barrier. I.e. the lifetimes
* of the kmp_task_team_t structs for consecutive barriers can overlap
@ -3107,7 +3107,7 @@ static void __kmp_enable_tasking(kmp_task_team_t *task_team,
* We currently use the existence of the threads array as an indicator that
* tasks were spawned since the last barrier. If the structure is to be
* useful outside the context of tasking, then this will have to change, but
* not settting the field minimizes the performance impact of tasking on
* not setting the field minimizes the performance impact of tasking on
* barriers, when no explicit tasks were spawned (pushed, actually).
*/
@ -4258,7 +4258,7 @@ int __kmp_taskloop_task(int gtid, void *ptask) {
// grainsize Number of loop iterations per task
// extras Number of chunks with grainsize+1 iterations
// tc Iterations count
// num_t_min Threashold to launch tasks recursively
// num_t_min Threshold to launch tasks recursively
// task_dup Tasks duplication routine
// codeptr_ra Return address for OMPT events
void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task,

View File

@ -194,7 +194,7 @@ void __kmp_query_cpuid(kmp_cpuinfo_t *p) {
KA_TRACE(trace_level, (" PSN"));
}
if ((buf.edx >> 19) & 1) {
/* CLFULSH - Cache Flush Instruction Available */
/* CLFLUSH - Cache Flush Instruction Available */
cflush_size =
data[1] * 8; /* Bits 15-08: CLFLUSH line size = 8 (64 bytes) */
KA_TRACE(trace_level, (" CLFLUSH(%db)", cflush_size));

View File

@ -15,11 +15,11 @@
#define KMP_WRAPPER_MALLOC_H
/* This header serves for 3 purposes:
1. Declaring standard memory allocation rourines in OS-independent way.
1. Declaring standard memory allocation routines in OS-independent way.
2. Passing source location info through memory allocation wrappers.
3. Enabling native memory debugging capabilities.
1. Declaring standard memory allocation rourines in OS-independent way.
1. Declaring standard memory allocation routines in OS-independent way.
-----------------------------------------------------------------------
On Linux* OS, alloca() function is declared in <alloca.h> header, while on
Windows* OS there is no <alloca.h> header, function _alloca() (note
@ -103,9 +103,9 @@
#error Unknown or unsupported OS.
#endif
/* KMP_SRC_LOC_DECL -- Declaring source location paramemters, to be used in
/* KMP_SRC_LOC_DECL -- Declaring source location parameters, to be used in
function declaration.
KMP_SRC_LOC_PARM -- Source location paramemters, to be used to pass
KMP_SRC_LOC_PARM -- Source location parameters, to be used to pass
parameters to underlying levels.
KMP_SRC_LOC_CURR -- Source location arguments describing current location,
to be used at top-level.

View File

@ -102,7 +102,7 @@ inline void ompt_set_thread_state(kmp_info_t *thread, ompt_state_t state) {
inline const char *ompt_get_runtime_version() {
return &__kmp_version_lib_ver[KMP_VERSION_MAGIC_LEN];
}
#endif // OMPT_SUPPRORT
#endif // OMPT_SUPPORT
// macros providing the OMPT callbacks for reduction clause
#if OMPT_SUPPORT && OMPT_OPTIONAL

View File

@ -2303,7 +2303,7 @@ ITT_STUBV(ITTAPI, void, marker, (const __itt_domain *domain, __itt_id id, __itt_
* name of the metadata), and a value (the actual data). The encoding of
* the value depends on the type of the metadata.
*
* The type of metadata is specified by an enumerated type __itt_metdata_type.
* The type of metadata is specified by an enumerated type __itt_metadata_type.
* @{
*/
@ -3196,7 +3196,7 @@ ITT_STUBV(ITTAPI, void, relation_add_ex, (const __itt_domain *domain,
#define __itt_relation_add_ex(d,x,y,z,a,b) ITTNOTIFY_VOID_D5(relation_add_ex,d,x,y,z,a,b)
#define __itt_relation_add_ex_ptr ITTNOTIFY_NAME(relation_add_ex)
#else /* INTEL_NO_ITTNOTIFY_API */
#define __itt_relation_add_to_current_ex(domain,clock_domain,timestame,relation,tail)
#define __itt_relation_add_to_current_ex(domain,clock_domain,timestamp,relation,tail)
#define __itt_relation_add_to_current_ex_ptr 0
#define __itt_relation_add_ex(domain,clock_domain,timestamp,head,relation,tail)
#define __itt_relation_add_ex_ptr 0

View File

@ -762,7 +762,7 @@ static const char* __itt_fsplit(const char* s, const char* sep, const char** out
/* This function return value of env variable that placed into static buffer.
* !!! The same static buffer is used for subsequent calls. !!!
* This was done to aviod dynamic allocation for few calls.
* This was done to avoid dynamic allocation for few calls.
* Actually we need this function only four times.
*/
static const char* __itt_get_env_var(const char* name)
@ -1012,7 +1012,7 @@ static void __itt_reinit_all_pointers(void)
static void __itt_nullify_all_pointers(void)
{
int i;
/* Nulify all pointers except domain_create, string_handle_create and counter_create */
/* Nullify all pointers except domain_create, string_handle_create and counter_create */
for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++)
*_N_(_ittapi_global).api_list_ptr[i].func_ptr = _N_(_ittapi_global).api_list_ptr[i].null_func;
}

View File

@ -2207,7 +2207,7 @@ int __kmp_get_load_balance(int max) {
#else // Linux* OS
// The fuction returns number of running (not sleeping) threads, or -1 in case
// The function returns number of running (not sleeping) threads, or -1 in case
// of error. Error could be reported if Linux* OS kernel too old (without
// "/proc" support). Counting running threads stops if max running threads
// encountered.

View File

@ -23,7 +23,7 @@ int test_omp_init_lock() {
omp_unset_lock(&my_lcks[j % LOCKS_PER_ITER]);
}
}
// Wait until all repititions are done. The test is exercising growth of
// Wait until all repetitions are done. The test is exercising growth of
// the global lock pool, which does not shrink when no locks are allocated.
{
int j;

View File

@ -3,7 +3,7 @@
// This test checks that values stored in task_data in a barrier_begin event
// are still present in the corresponding barrier_end event.
// Therefore, callback implementations different from the ones in callback.h are neccessary.
// Therefore, callback implementations different from the ones in callback.h are necessary.
// This is a test for an issue reported in
// https://github.com/OpenMPToolsInterface/LLVM-openmp/issues/39

View File

@ -39,7 +39,7 @@ typedef struct red_input {
void *reduce_orig; /**< original reduction item used for initialization */
size_t reduce_size; /**< size of data item in bytes */
// three compiler-generated routines (init, fini are optional):
void *reduce_init; /**< data initialization routine (single paramemter) */
void *reduce_init; /**< data initialization routine (single parameter) */
void *reduce_fini; /**< data finalization routine */
void *reduce_comb; /**< data combiner routine */
unsigned flags; /**< flags for additional info from compiler */

View File

@ -36,7 +36,7 @@ typedef struct red_input {
void *reduce_shar; /**< shared between tasks item to reduce into */
size_t reduce_size; /**< size of data item in bytes */
// three compiler-generated routines (init, fini are optional):
void *reduce_init; /**< data initialization routine (single paramemter) */
void *reduce_init; /**< data initialization routine (single parameter) */
void *reduce_fini; /**< data finalization routine */
void *reduce_comb; /**< data combiner routine */
unsigned flags; /**< flags for additional info from compiler */

View File

@ -39,7 +39,7 @@ typedef struct red_input {
void *reduce_orig; /**< original reduction item used for initialization */
size_t reduce_size; /**< size of data item in bytes */
// three compiler-generated routines (init, fini are optional):
void *reduce_init; /**< data initialization routine (single paramemter) */
void *reduce_init; /**< data initialization routine (single parameter) */
void *reduce_fini; /**< data finalization routine */
void *reduce_comb; /**< data combiner routine */
unsigned flags; /**< flags for additional info from compiler */

View File

@ -36,7 +36,7 @@ typedef struct red_input {
void *reduce_shar; /**< shared between tasks item to reduce into */
size_t reduce_size; /**< size of data item in bytes */
// three compiler-generated routines (init, fini are optional):
void *reduce_init; /**< data initialization routine (single paramemter) */
void *reduce_init; /**< data initialization routine (single parameter) */
void *reduce_fini; /**< data finalization routine */
void *reduce_comb; /**< data combiner routine */
unsigned flags; /**< flags for additional info from compiler */

View File

@ -150,7 +150,7 @@ int test_omp_for_schedule_static_3()
* same logical assignment of chunks to threads. We use the nowait
* clause to increase the probability to get an error. */
/* First we allocate some more memmory */
/* First we allocate some more memory */
free (tids);
tids = (int *) malloc (sizeof (int) * LOOPCOUNT);
tids2 = (int *) malloc (sizeof (int) * LOOPCOUNT);

View File

@ -23,7 +23,7 @@ my $target_os;
my $target_arch;
# --------------------------------------------------------------------------------------------------
# Ouput parse error.
# Output parse error.
# $tool -- Name of tool.
# @bulk -- Output of the tool.
# $n -- Number of line caused parse error.

View File

@ -74,7 +74,7 @@ B<check-execstack.pl> -- Check whether stack is executable, issue an error if so
=head1 SYNOPSIS
B<check-execstack.pl> I<optiion>... I<file>...
B<check-execstack.pl> I<option>... I<file>...
=head1 DESCRIPTION

View File

@ -124,7 +124,7 @@ sub check_file($;$$) {
my $n = 0;
my $errors = 0;
my $current_func = ""; # Name of current fuction.
my $current_func = ""; # Name of current function.
my $reported_func = ""; # name of last reported function.
foreach my $line ( @bulk ) {
++ $n;

View File

@ -152,7 +152,7 @@ sub generate_output(\%$) {
print( $bulk );
}; # if
}; # sub generate_ouput
}; # sub generate_output
#
# Parse command line.
@ -268,7 +268,7 @@ A name of input file.
=head1 DESCRIPTION
The script reads input file, process conditional directives, checks content for consistency, and
generates ouptput file suitable for linker.
generates output file suitable for linker.
=head2 Input File Format
@ -287,7 +287,7 @@ Comments start with C<#> symbol and continue to the end of line.
%endif
A part of file surrounded by C<%ifdef I<name>> and C<%endif> directives is a conditional part -- it
has effect only if I<name> is defined in the comman line by B<--define> option. C<%ifndef> is a
has effect only if I<name> is defined in the command line by B<--define> option. C<%ifndef> is a
negated version of C<%ifdef> -- conditional part has an effect only if I<name> is B<not> defined.
Conditional parts may be nested.

View File

@ -187,12 +187,12 @@ sub target_options() {
set_target_os( $_[ 1 ] ) or
die "Bad value of --target-os option: \"$_[ 1 ]\"\n";
},
"target-architecture|targert-arch|architecture|arch=s" =>
"target-architecture|target-arch|architecture|arch=s" =>
sub {
set_target_arch( $_[ 1 ] ) or
die "Bad value of --target-architecture option: \"$_[ 1 ]\"\n";
},
"target-mic-architecture|targert-mic-arch|mic-architecture|mic-arch=s" =>
"target-mic-architecture|target-mic-arch|mic-architecture|mic-arch=s" =>
sub {
set_target_mic_arch( $_[ 1 ] ) or
die "Bad value of --target-mic-architecture option: \"$_[ 1 ]\"\n";
@ -390,7 +390,7 @@ naming files, directories, macros, etc.
my $os = canon_os( "Windows NT" ); # Returns "win".
print( $host_arch, $host_os, $host_platform );
print( $taregt_arch, $target_os, $target_platform );
print( $target_arch, $target_os, $target_platform );
tools::get_options(
Platform::target_options(),

View File

@ -87,7 +87,7 @@ my @warning = ( sub {}, \&warning, \&runtime_error );
sub check_opts(\%$;$) {
my $opts = shift( @_ ); # Referense to hash containing real options and their values.
my $opts = shift( @_ ); # Reference to hash containing real options and their values.
my $good = shift( @_ ); # Reference to an array containing all known option names.
my $msg = shift( @_ ); # Optional (non-mandatory) message.
@ -237,7 +237,7 @@ B<Description:>
It is very simple wrapper arounf Getopt::Long::GetOptions. It passes all arguments to GetOptions,
and add definitions for standard help options: --help, --doc, --verbose, and --quiet.
When GetOptions finihes, this subroutine checks exit code, if it is non-zero, standard error
When GetOptions finishes, this subroutine checks exit code, if it is non-zero, standard error
message is issued and script terminated.
If --verbose or --quiet option is specified, C<tools.pm_verbose> environment variable is set.
@ -333,7 +333,7 @@ B<Synopsis:>
B<Description:>
Package variable. It determines verbosity level, which affects C<warning()>, C<info()>, and
C<debug()> subroutnes .
C<debug()> subroutines .
The variable gets initial value from C<tools.pm_verbose> environment variable if it is exists.
If the environment variable does not exist, variable is set to 2.
@ -357,7 +357,7 @@ B<Synopsis:>
B<Description:>
Package variable. It determines whether C<debug()>, C<info()>, C<warning()>, C<runtime_error()>
subroutnes print timestamps or not.
subroutines print timestamps or not.
The variable gets initial value from C<tools.pm_timestamps> environment variable if it is exists.
If the environment variable does not exist, variable is set to false.
@ -700,7 +700,7 @@ Look for "echo" in the directories specified in PATH:
my $echo = which( "echo" );
Look for all occurenses of "cp" in the PATH:
Look for all occurrences of "cp" in the PATH:
my @cps = which( "cp", -all => 1 );
@ -1488,7 +1488,7 @@ B<Arguments:>
=item B<$file>
The name or handle of file to writte to.
The name or handle of file to write to.
=item B<$bulk>

View File

@ -71,7 +71,7 @@ To compile Fortran applications, compile with gfortran, link with clang:
## Runtime Flags
TSan runtime flags are passed via **TSAN&#95;OPTIONS** environment variable,
we highly recommend the following option to aviod false alerts for the
we highly recommend the following option to avoid false alerts for the
OpenMP or MPI runtime implementation:
export TSAN_OPTIONS="ignore_noninstrumented_modules=1"

View File

@ -258,7 +258,7 @@ template <typename T, int N> struct DataPool {
T data;
};
// We alloc without initialize the memory. We cannot call constructors.
// Therfore use malloc!
// Therefore use malloc!
pooldata *datas = (pooldata *)malloc(sizeof(pooldata) * N);
memory.push_back(datas);
for (int i = 0; i < N; i++) {