diff --git a/parsec/mca/sched/pcb/sched_pcb.h b/parsec/mca/sched/pcb/sched_pcb.h new file mode 100644 index 000000000..93ddba31b --- /dev/null +++ b/parsec/mca/sched/pcb/sched_pcb.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2022 The University of Tennessee and The University + * of Tennessee Research Foundation. All rights + * reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + */ + +/** + * @file + * + * Priority Controlled Binding scheduler + * + * This scheduler uses some bits of the priority word attached to each + * task to define which set of threads can execute the task. + * + * The bits that are used in the priority are defined using the MCA + * parameter sched_pcb_priority_mask, which should contain enough + * consecutive bits to express a number between 0 and N (inclusive) where + * N is the number of 'thread groups'. + * + * Each computing thread of a given process belongs to a thread group. + * If PaRSEC is compiled with HWLOC, which threads belong to which group + * are defined using the HWLOC tree hierarchy and the MCA parameter + * sched_pcb_sharing_level: a sched_pcb_sharing_level of L means that all + * threads bound to a core that is under the same node of depth L in the HWLOC + * tree belong to the same group. Setting sched_pcb_sharing_level to 0 means + * that all threads are in the same group (they are under the root of the tree), + * and setting it to parsec_hwloc_nb_levels()-1 means that each thread is + * in its own group, by itself. Intermediate values have different results + * depending on the machine hierarchy. + * + * If PaRSEC is compiled without HWLOC, the MCA parameter is not exposed, + * and there is a single behavior: each thread belongs to its own group, + * by itself. + * + * There is a 'special' group: the group 0 (other groups are named 1 to N). + * Tasks that are bound to that group are in fact shared between all threads + * (as is usual for other schedulers). + * + * So, tasks with a priority 0 are always scheduled opportunistically on + * any thread. Because startup tasks also initialize their priority to -1, + * tasks with priority -1 are also handled specially by allowing any thread + * to execute them (they are assigned the group 0 despite their priority). + * + * Last, the scheduler uses the priority value to order all the tasks that + * are bound to a given group. + * + * At task selection time, a thread compares the priority of the highest + * priority task of the group 0 and the highest priority task of its own + * group, and selects the task with the highest priority. + * + * Access to the task lists are protected with locks. + */ + +#ifndef MCA_SCHED_PCB_H +#define MCA_SCHED_PCB_H + +#include "parsec/parsec_config.h" +#include "parsec/mca/mca.h" +#include "parsec/mca/sched/sched.h" + + +BEGIN_C_DECLS + +/** + * Globally exported variable + */ +PARSEC_DECLSPEC extern const parsec_sched_base_component_t parsec_sched_pcb_component; +PARSEC_DECLSPEC extern const parsec_sched_module_t parsec_sched_pcb_module; +/* static accessor */ +mca_base_component_t *sched_pcb_static_component(void); +extern int sched_pcb_sharing_level; +extern int sched_pcb_group_mask; +extern int sched_pcb_group_shift; + +END_C_DECLS +#endif /* MCA_SCHED_PCB_H */ diff --git a/parsec/mca/sched/pcb/sched_pcb_component.c b/parsec/mca/sched/pcb/sched_pcb_component.c new file mode 100644 index 000000000..4ff8c1925 --- /dev/null +++ b/parsec/mca/sched/pcb/sched_pcb_component.c @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2022 The University of Tennessee and The University + * of Tennessee Research Foundation. All rights + * reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + * + * These symbols are in a file by themselves to provide nice linker + * semantics. Since linkers generally pull in symbols by object + * files, keeping these symbols as the only symbols in this file + * prevents utility programs such as "ompi_info" from having to import + * entire components just to query their version and parameters. + */ + +#include "parsec/parsec_config.h" +#include "parsec/runtime.h" + +#include "parsec/mca/sched/sched.h" +#include "parsec/mca/sched/pcb/sched_pcb.h" +#include "parsec/papi_sde.h" +#include "parsec/utils/debug.h" +#include "parsec/utils/mca_param.h" + +#if defined(PARSEC_HAVE_HWLOC) +#include "parsec/parsec_hwloc.h" +#endif + +/* + * Local function + */ +static int sched_pcb_component_query(mca_base_module_t **module, int *priority); +static int sched_pcb_component_register(void); + +int sched_pcb_sharing_level = 1; +int sched_pcb_group_mask = 0x7f000000; +int sched_pcb_group_shift = 24; +/* + * Instantiate the public struct with all of our public information + * and pointers to our public functions in it + */ +const parsec_sched_base_component_t parsec_sched_pcb_component = { + + /* First, the mca_component_t struct containing meta information + about the component itself */ + + { + PARSEC_SCHED_BASE_VERSION_2_0_0, + + /* Component name and version */ + "pcb", + "", /* options */ + PARSEC_VERSION_MAJOR, + PARSEC_VERSION_MINOR, + + /* Component open and close functions */ + NULL, /*< No open: sched_pcb is always available, no need to check at runtime */ + NULL, /*< No close: open did not allocate any resource, no need to release them */ + sched_pcb_component_query, + /*< specific query to return the module and add it to the list of available modules */ + sched_pcb_component_register, /*< Register at least the SDE events */ + "", /*< no reserve */ + }, + { + /* The component has no metada */ + MCA_BASE_METADATA_PARAM_NONE, + "", /*< no reserve */ + } +}; +mca_base_component_t *sched_pcb_static_component(void) +{ + return (mca_base_component_t *)&parsec_sched_pcb_component; +} + +static int sched_pcb_component_query(mca_base_module_t **module, int *priority) +{ + /* module type should be: const mca_base_module_t ** */ + void *ptr = (void*)&parsec_sched_pcb_module; + *priority = 2; + *module = (mca_base_module_t *)ptr; + return MCA_SUCCESS; +} + +static int sched_pcb_component_register(void) +{ + PARSEC_PAPI_SDE_DESCRIBE_COUNTER("SCHEDULER::PENDING_TASKS::SCHED=PCB", + "the number of pending tasks for the PCB scheduler"); + PARSEC_PAPI_SDE_DESCRIBE_COUNTER("SCHEDULER::PENDING_TASKS::QUEUE=::SCHED=PCB", + "the number of pending tasks that end up in the virtual process for the LFQ scheduler"); + sched_pcb_sharing_level = 1; +#if defined(PARSEC_HAVE_HWLOC) + sched_pcb_sharing_level = parsec_hwloc_nb_levels()-1; + parsec_mca_param_reg_int_name("sched_pcb", "sharing_level", + "Defines at what level threads share the same task list for the Priority Controlled Binding scheduler. " + "Level 1 means each thread has its own task list, level 2 looks one level above in the HWLOC hierarchy, etc...", + false, false, parsec_hwloc_nb_levels()-1, &sched_pcb_sharing_level); + if(sched_pcb_sharing_level <= 0) + sched_pcb_sharing_level = 1; + if(sched_pcb_sharing_level >= parsec_hwloc_nb_levels()) + sched_pcb_sharing_level = parsec_hwloc_nb_levels()-1; +#endif + parsec_mca_param_reg_int_name("sched_pcb", "group_mask", + "Defines what bits of the priority are used to designate a process group. Other bits are of the priority value " + "are used to define the priority of the task within that group.", + false, false, 0x7f000000, &sched_pcb_group_mask); + if(sched_pcb_group_mask != 0x7f000000) { + sched_pcb_group_shift = 0; + while( (unsigned int)sched_pcb_group_shift < 8*sizeof(int) && + (((sched_pcb_group_mask >> sched_pcb_group_shift) & 1) == 0) ) + sched_pcb_group_shift++; + if(sched_pcb_group_shift == 8*sizeof(int)) { + parsec_warning("Priority Controlled Binding Scheduler (sched_pcb): sched_pcb_group_mask is set to 0. Scheduler might not work as intended."); + } + } + + return MCA_SUCCESS; +} diff --git a/parsec/mca/sched/pcb/sched_pcb_module.c b/parsec/mca/sched/pcb/sched_pcb_module.c new file mode 100644 index 000000000..53d73ac21 --- /dev/null +++ b/parsec/mca/sched/pcb/sched_pcb_module.c @@ -0,0 +1,418 @@ +/** + * Copyright (c) 2022 The University of Tennessee and The University + * of Tennessee Research Foundation. All rights + * reserved. + * $COPYRIGHT$ + * + * Additional copyrights may follow + * + * $HEADER$ + * + */ + +#include "parsec/parsec_config.h" +#include "parsec/parsec_internal.h" +#include "parsec/utils/debug.h" +#include "parsec/class/lifo.h" + +#include "parsec/mca/sched/sched.h" +#include "parsec/mca/sched/pcb/sched_pcb.h" +#include "parsec/mca/pins/pins.h" +#include "parsec/parsec_hwloc.h" +#include "parsec/papi_sde.h" + +/** + * Module functions + */ +static int sched_pcb_install(parsec_context_t* master); +static int sched_pcb_schedule(parsec_execution_stream_t* es, + parsec_task_t* new_context, + int32_t distance); +static parsec_task_t* +sched_pcb_select(parsec_execution_stream_t *es, + int32_t* distance); +static void sched_pcb_remove(parsec_context_t* master); +static int sched_pcb_init(parsec_execution_stream_t* es, struct parsec_barrier_t* barrier); +static int sched_pcb_warning_issued = 0; + +const parsec_sched_module_t parsec_sched_pcb_module = { + &parsec_sched_pcb_component, + { + sched_pcb_install, + sched_pcb_init, + sched_pcb_schedule, + sched_pcb_select, + NULL, + sched_pcb_remove + } +}; + +/** + * @brief scheduler structure: each subset of threads holds a single (locked) list sorted by priority + * This structure is locked for any access. Any threads can push in this structure, + * only threads that belong to the subset can pop from it. + */ +typedef struct sched_pcb_scheduler_object_s { + parsec_list_t group_tasks; /**< List of tasks bound to the group */ + parsec_list_t *shared_tasks; /**< List of tasks shared between all processes */ + int group_id; /**< Group identifier for this group. NB: group identifiers start at 1!! -- 0 identifies the group of shared tasks */ + int nb_groups; /**< Number of groups found for this process */ + struct sched_pcb_scheduler_object_s *groups[1]; /**< nb_groups long array of scheduler objects to quickly find any other list from any scheduler object */ +} sched_pcb_scheduler_object_t; +#define SCHED_PCB_SO(es) ((sched_pcb_scheduler_object_t*) (es)->scheduler_object) + +/** + * @brief + * Installs the scheduler on a parsec context + * + * @details + * This function has nothing to do, as all operations are done in + * init. + * + * @param[INOUT] master the parsec_context_t on which this scheduler should be installed + * @return PARSEC_SUCCESS iff this scheduler has been installed + */ +static int sched_pcb_install( parsec_context_t *master ) +{ + sched_pcb_warning_issued = 0; + (void)master; + return PARSEC_SUCCESS; +} + +#if defined(PARSEC_PAPI_SDE) +/** + * @brief counts the number of items in the group tasks + * + * @details only used if PAPI_SDE is enabled, this will be called by PAPI + * in case the corresponding counter is considered. + * + */ +static long long int sched_pcb_group_tasks_length( void *_es ) +{ + parsec_execution_stream_t *es = (parsec_execution_stream_t *)_es; + sched_pcb_scheduler_object_t *so = SCHED_PCB_SO(es); + long long int len = 0; + PARSEC_LIST_ITERATOR(&so->group_tasks, item, {len++;}); + return len; +} + +/** + * @brief counts the number of items in the shared tasks + * + * @details only used if PAPI_SDE is enabled, this will be called by PAPI + * in case the corresponding counter is considered. + * + */ +static long long int sched_pcb_shared_tasks_length( void *_es ) +{ + parsec_execution_stream_t *es = (parsec_execution_stream_t *)_es; + sched_pcb_scheduler_object_t *so = SCHED_PCB_SO(es); + long long int len = 0; + PARSEC_LIST_ITERATOR(so->shared_tasks, item, {len++;}); + return len; +} +#endif + +// Make sure the group requested by the priority fits in the existing set of groups +static int sched_pcb_group(int p, sched_pcb_scheduler_object_t *so) { + int group = (p & sched_pcb_group_mask); + if(-1 == p) + return 0; + if(0 == group) + return 0; + group = group >> sched_pcb_group_shift; + group = ((group - 1) % so->nb_groups) + 1; + return group; +} + +/** + * @brief + * Initialize the scheduler on the calling execution stream + * + * @details + * Creates a list per group if this es is the master of the group, store it into es->scheduling_object, and + * synchronize with the other execution streams using the barrier + * + * @param[INOUT] es the calling execution stream + * @param[INOUT] barrier the barrier used to synchronize all the es + * @return PARSEC_SUCCESS in case of success, a negative number otherwise + */ +static int sched_pcb_init(parsec_execution_stream_t* es, struct parsec_barrier_t* barrier) +{ + sched_pcb_scheduler_object_t *so; + // If there is no HWLOC, we always have 1 group per thread + int master_id = es->th_id; + int nb_groups = es->virtual_process->nb_cores; + +#if defined(PARSEC_HAVE_HWLOC) + master_id = parsec_hwloc_master_id(sched_pcb_sharing_level, es->th_id); + + nb_groups = 0; + for(int t = 0; t < es->virtual_process->nb_cores; t++) { + if(t == parsec_hwloc_master_id(sched_pcb_sharing_level, t)) + nb_groups++; + } +#endif + + if(master_id == es->th_id) { + so = (sched_pcb_scheduler_object_t*)malloc(sizeof(sched_pcb_scheduler_object_t) + (nb_groups-1)*sizeof(sched_pcb_scheduler_object_t*)); + so->group_id = -1; + so->nb_groups = nb_groups; + PARSEC_OBJ_CONSTRUCT(&so->group_tasks, parsec_list_t); + es->scheduler_object = so; + + if(0 == es->th_id) { + so->shared_tasks = PARSEC_OBJ_NEW(parsec_list_t); + } + + parsec_barrier_wait(barrier); + + if(0 != es->th_id) { + so->shared_tasks = SCHED_PCB_SO(es->virtual_process->execution_streams[0])->shared_tasks; + } + +#if defined(PARSEC_PAPI_SDE) + char event_name[PARSEC_PAPI_SDE_MAX_COUNTER_NAME_LEN]; + snprintf(event_name, PARSEC_PAPI_SDE_MAX_COUNTER_NAME_LEN, "SCHEDULER::PENDING_TASKS::QUEUE=%d::SCHED=PCB", es->th_id); + parsec_papi_sde_register_fp_counter(event_name, PAPI_SDE_RO|PAPI_SDE_INSTANT, PAPI_SDE_int, (papi_sde_fptr_t)sched_pcb_group_tasks_length, es); + parsec_papi_sde_add_counter_to_group(event_name, "SCHEDULER::PENDING_TASKS", PAPI_SDE_SUM); + parsec_papi_sde_add_counter_to_group(event_name, "SCHEDULER::PENDING_TASKS::SCHED=PCB", PAPI_SDE_SUM); + + if(0 == es->th_id) { + snprintf(event_name, PARSEC_PAPI_SDE_MAX_COUNTER_NAME_LEN, "SCHEDULER::PENDING_TASKS::QUEUE=SHARED::SCHED=PCB", es->th_id); + parsec_papi_sde_register_fp_counter(event_name, PAPI_SDE_RO|PAPI_SDE_INSTANT, PAPI_SDE_int, (papi_sde_fptr_t)sched_pcb_shared_tasks_length, es); + parsec_papi_sde_add_counter_to_group(event_name, "SCHEDULER::PENDING_TASKS", PAPI_SDE_SUM); + parsec_papi_sde_add_counter_to_group(event_name, "SCHEDULER::PENDING_TASKS::SCHED=PCB", PAPI_SDE_SUM); + } +#endif + } else { + assert(0 != es->th_id); + parsec_barrier_wait(barrier); + es->scheduler_object = es->virtual_process->execution_streams[master_id]->scheduler_object; + } + + // Thread 0 needs to wait that all others have set their scheduler object + parsec_barrier_wait(barrier); + + if(0 == es->th_id) { + // Core 0 names all the groups in sequence + int group_id = 1; // NB: group IDs start at 1, because 0 is reserved to designate the shared list of tasks + sched_pcb_scheduler_object_t *tso; + for(int t = 0; t < es->virtual_process->nb_cores; t++) { + tso = SCHED_PCB_SO(es->virtual_process->execution_streams[t]); + if(tso->group_id == -1) { + so->groups[group_id-1] = tso; + tso->group_id = group_id++; + } + } + } + + // Make sure that group assignments and the array of groups are visible by all before copying group 1 array + parsec_barrier_wait(barrier); + + if(so->group_id > 1 && es->th_id == master_id) { + sched_pcb_scheduler_object_t *so0 = SCHED_PCB_SO(es->virtual_process->execution_streams[0]); + memcpy(so->groups, so0->groups, sizeof(sched_pcb_scheduler_object_t*)*nb_groups); + } + + // and make sure that the groups array is visible by any other thread in this group + parsec_barrier_wait(barrier); + + return PARSEC_SUCCESS; +} + +/** + * @brief + * Selects a task to run + * + * @details + * Take the highest priority task between the head of the calling execution stream's group list + * and the shared list of tasks; do nothing if both are empty. + * + * @param[INOUT] es the calling execution stream + * @param[OUT] distance the distance of the selected task. This scheduler + * always returns 0 + * @return the selected task + */ +static parsec_task_t* sched_pcb_select(parsec_execution_stream_t *es, + int32_t* distance) +{ + parsec_task_t *group_task = NULL, *shared_task = NULL, *pop_task = NULL, *candidate_task = NULL; + parsec_list_t *candidate_list = NULL; + sched_pcb_scheduler_object_t *so = SCHED_PCB_SO(es); + int group_task_priority, shared_task_priority; + + *distance = 0; + + for(;;) { + // Peak at the head of both lists to find which task has the highest priority + parsec_list_lock(&so->group_tasks); + group_task = (parsec_task_t*)PARSEC_LIST_ITERATOR_FIRST(&so->group_tasks); + if((parsec_list_item_t*)group_task != PARSEC_LIST_ITERATOR_END(&so->group_tasks)) { + group_task_priority = group_task->priority; + } else { + group_task = NULL; + } + parsec_list_unlock(&so->group_tasks); + parsec_list_lock(so->shared_tasks); + shared_task = (parsec_task_t*)PARSEC_LIST_ITERATOR_FIRST(so->shared_tasks); + if((parsec_list_item_t*)shared_task != PARSEC_LIST_ITERATOR_END(so->shared_tasks)) { + shared_task_priority = shared_task->priority; + } else { + shared_task = NULL; + } + parsec_list_unlock(so->shared_tasks); + + // If one of the lists is empty, return the head of the other without caring for priority + if( NULL == shared_task ) { + pop_task = (parsec_task_t*)parsec_list_pop_front(&so->group_tasks); + if(NULL != pop_task) { + PARSEC_LIST_ITEM_SINGLETON(pop_task); + } + return pop_task; + } + if( NULL == group_task ) { + pop_task = (parsec_task_t*)parsec_list_pop_front(so->shared_tasks); + if(NULL != pop_task) { + PARSEC_LIST_ITEM_SINGLETON(pop_task); + } + return pop_task; + } + + // Determine which list has the highest priority task + if( shared_task_priority > group_task_priority ) { + candidate_task = shared_task; + candidate_list = so->shared_tasks; + } else { + candidate_task = group_task; + candidate_list = &so->group_tasks; + } + + // Try to pop it... But if the list head has changed, do not pop a random task, push + // it back, and start again. + pop_task = (parsec_task_t*)parsec_list_pop_front(candidate_list); + PARSEC_LIST_ITEM_SINGLETON(pop_task); + if(pop_task == candidate_task) { + return pop_task; + } + parsec_list_push_sorted(candidate_list, &pop_task->super, parsec_execution_context_priority_comparator); + } +} + +/** + * @brief + * Schedule a set of ready tasks on the calling execution stream + * + * @details + * Split the chain of tasks based on their priority, and the + * group this priority points to. + * + * @param[INOUT] es the calling execution stream + * @param[INOUT] new_context the ring of ready tasks to schedule + * @param[IN] distance the distance hint is ignored... + * @return PARSEC_SUCCESS in case of success, a negative number + * otherwise. + */ +static int sched_pcb_schedule(parsec_execution_stream_t* es, + parsec_task_t* new_context, + int32_t distance) +{ + sched_pcb_scheduler_object_t *so; + parsec_list_t *target; + parsec_task_t *grp_tasks, *other_tasks; + int group; + + (void)distance; + + so = SCHED_PCB_SO(es); + + other_tasks = new_context; + while(other_tasks != NULL) { + grp_tasks = other_tasks; + other_tasks = (parsec_task_t*)parsec_list_item_ring_chop(&grp_tasks->super); + PARSEC_LIST_ITEM_SINGLETON(&grp_tasks->super); + + group = sched_pcb_group(grp_tasks->priority, so); + + // Chain all other tasks belonging to the same group in ctx + while(NULL != other_tasks && sched_pcb_group(other_tasks->priority, so) == group) { + parsec_task_t *tmp = other_tasks; + other_tasks = (parsec_task_t*)parsec_list_item_ring_chop(&tmp->super); + PARSEC_LIST_ITEM_SINGLETON(&tmp->super); + parsec_list_item_ring_push(&grp_tasks->super, &tmp->super); + } + + // We found at least one task left in other_tasks that belongs to a different group, + // we continue iterating on other_tasks, but now we have a sentinel to stop the iteration + if(NULL != other_tasks) { + parsec_task_t *t = (parsec_task_t*)PARSEC_LIST_ITEM_NEXT(&other_tasks->super); + do { + if(group == sched_pcb_group(t->priority, so)) { + parsec_task_t *tmp = t; + t = (parsec_task_t*)parsec_list_item_ring_chop(&tmp->super); + PARSEC_LIST_ITEM_SINGLETON(&tmp->super); + assert(NULL != t /* other_task should at least belong to this ring */); + + parsec_list_item_ring_push(&grp_tasks->super, &tmp->super); + } else { + t = (parsec_task_t*)PARSEC_LIST_ITEM_NEXT(&t->super); + } + } while(t != other_tasks); + } + + // Chain the tasks belong to group into the appropriate target list + if(group > 0) + target = &so->groups[group-1]->group_tasks; + else + target = so->shared_tasks; + parsec_list_chain_sorted(target, &grp_tasks->super, parsec_execution_context_priority_comparator); + } + + return PARSEC_SUCCESS; +} + +/** + * @brief + * Removes the scheduler from the parsec_context_t + * + * @details + * Release the LIFO for each execution stream + * + * @param[INOUT] master the parsec_context_t from which the scheduler should + * be removed + */ +static void sched_pcb_remove( parsec_context_t *master ) +{ + int p, t; + parsec_execution_stream_t *es; + sched_pcb_scheduler_object_t *so; + parsec_vp_t *vp; + + for(p = 0; p < master->nb_vp; p++) { + vp = master->virtual_processes[p]; + for(t = 0; t < vp->nb_cores; t++) { + int master_id = t; +#if defined(PARSEC_HAVE_HWLOC) + master_id = parsec_hwloc_master_id(sched_pcb_sharing_level, t); +#endif + es = vp->execution_streams[t]; + if(t == master_id) { + so = SCHED_PCB_SO(es); + assert(so->group_tasks.super.obj_reference_count == 1); + PARSEC_OBJ_DESTRUCT(&so->group_tasks); + + if(0 == t) { + PARSEC_OBJ_RELEASE(so->shared_tasks); + assert(NULL == so->shared_tasks); + } + + free(so); + PARSEC_PAPI_SDE_UNREGISTER_COUNTER("SCHEDULER::PENDING_TASKS::QUEUE=%d::SCHED=PCB", t); + } + + es->scheduler_object = NULL; + } + } + PARSEC_PAPI_SDE_UNREGISTER_COUNTER("SCHEDULER::PENDING_TASKS::SCHED=PCB"); + PARSEC_PAPI_SDE_UNREGISTER_COUNTER("SCHEDULER::PENDING_TASKS"); +} diff --git a/tests/runtime/scheduling/CMakeLists.txt b/tests/runtime/scheduling/CMakeLists.txt index 81dcb381c..5729dec14 100644 --- a/tests/runtime/scheduling/CMakeLists.txt +++ b/tests/runtime/scheduling/CMakeLists.txt @@ -1,6 +1,9 @@ include(ParsecCompilePTG) -parsec_addtest_executable(C schedmicro SOURCES main.c ep_wrapper.c schedmicro_data.c) +parsec_addtest_executable(C schedmicro SOURCES schedmicro.c ep_wrapper.c rt_data.c) target_ptg_sources(schedmicro PRIVATE "ep.jdf") target_link_libraries(schedmicro PRIVATE m) +parsec_addtest_executable(C pcb SOURCES pcb-main.c pcb_wrapper.c rt_data.c) +target_ptg_sources(pcb PRIVATE "pcb.jdf") +target_link_libraries(pcb PRIVATE m) diff --git a/tests/runtime/scheduling/pcb-main.c b/tests/runtime/scheduling/pcb-main.c new file mode 100644 index 000000000..e996b6f20 --- /dev/null +++ b/tests/runtime/scheduling/pcb-main.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2013-2020 The University of Tennessee and The University + * of Tennessee Research Foundation. All rights + * reserved. + */ + +#include +#include "parsec/runtime.h" +#include "parsec/utils/debug.h" +#include "pcb_wrapper.h" +#include "rt_data.h" +#include "parsec/os-spec-timing.h" +#if defined(PARSEC_HAVE_STRING_H) +#include +#endif /* defined(PARSEC_HAVE_STRING_H) */ +#include +#if defined(PARSEC_HAVE_MPI) +#include +#endif /* defined(PARSEC_HAVE_MPI) */ + +int main(int argc, char *argv[]) +{ + parsec_context_t* parsec; + int rank, world, rc; + int nt = 64, level = 8; + parsec_data_collection_t *dcA; + parsec_taskpool_t *pcb; + int parsec_argc = 0; + char **parsec_argv = NULL; + +#if defined(PARSEC_HAVE_MPI) + { + int provided; + MPI_Init_thread(&argc, &argv, MPI_THREAD_SERIALIZED, &provided); + } + MPI_Comm_size(MPI_COMM_WORLD, &world); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); +#else + world = 1; + rank = 0; +#endif + for(int a = 1; a < argc; a++) { + if(strcmp(argv[a], "--") == 0) { + parsec_argc = argc - a; + parsec_argv = &argv[a]; + break; + } + if(strcmp(argv[a], "-t") == 0) { + a++; + nt = atoi(argv[a]); + continue; + } + if(strcmp(argv[a], "-l") == 0) { + a++; + level = atoi(argv[a]); + continue; + } + fprintf(stderr, "Usage: %s [-t NT] [-l LEVEL] [-- (DEPTH >= 1) ? S TASK(1..NT, 1) + +BODY + /* Nothing to do in the INIT task. + * It is here to prevent all tasks to be created at + * the DAG creation time + * It has a default priority, so should be able to run on any thread */ +END + +TASK(i, l) + i = 1..NT + l = 1..DEPTH + p = %{ return (i << 24) + i; %} + +:A(i-1) + +CTL S <- (l == 1) ? S INIT(0) + : S TASK(i, l-1) + -> (l < DEPTH) ? S TASK(i, l+1) + +;p + +BODY + printf("TASK(%3d, %3d) with priority %08x on thread %2d of process %2d\n", + i, l, this_task->priority, es->th_id, es->virtual_process->parsec_context->my_rank); +END diff --git a/tests/runtime/scheduling/pcb_wrapper.c b/tests/runtime/scheduling/pcb_wrapper.c new file mode 100644 index 000000000..d27c99016 --- /dev/null +++ b/tests/runtime/scheduling/pcb_wrapper.c @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2014-2020 The University of Tennessee and The University + * of Tennessee Research Foundation. All rights + * reserved. + */ + +#include "parsec/runtime.h" +#include + +#include "parsec/data_distribution.h" +#include "parsec/arena.h" + +#include "pcb.h" +#include "pcb_wrapper.h" + +/** + * @param [IN] A the data, already distributed and allocated + * @param [IN] nt number of tasks at a given level + * @param [IN] level number of levels + * + * @return the parsec object to schedule. + */ +parsec_taskpool_t *pcb_new(parsec_data_collection_t *A, int nt, int level) +{ + parsec_pcb_taskpool_t *tp = NULL; + + if( nt <= 0 || level <= 0 ) { + fprintf(stderr, "To work, PCB must have at least one task to run per level\n"); + return (parsec_taskpool_t*)tp; + } + + tp = parsec_pcb_new(nt, level, A); + +#if defined(PARSEC_HAVE_MPI) + { + MPI_Aint extent; +#if defined(PARSEC_HAVE_MPI_20) + MPI_Aint lb = 0; + MPI_Type_get_extent(MPI_BYTE, &lb, &extent); +#else + MPI_Type_extent(MPI_BYTE, &extent); +#endif /* defined(PARSEC_HAVE_MPI_20) */ + /* The datatype is irrelevant as the example does not do communications between nodes */ + parsec_arena_datatype_construct( &tp->arenas_datatypes[PARSEC_pcb_DEFAULT_ADT_IDX], + extent, PARSEC_ARENA_ALIGNMENT_SSE, + MPI_BYTE ); + } +#endif + + return (parsec_taskpool_t*)tp; +} + diff --git a/tests/runtime/scheduling/pcb_wrapper.h b/tests/runtime/scheduling/pcb_wrapper.h new file mode 100644 index 000000000..16366f024 --- /dev/null +++ b/tests/runtime/scheduling/pcb_wrapper.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2014-2020 The University of Tennessee and The University + * of Tennessee Research Foundation. All rights + * reserved. + */ + +#ifndef _pcb_wrapper_h +#define _pcb_wrapper_h + +#include "parsec/runtime.h" +#include "parsec/data_distribution.h" + +/** + * @param [IN] A the data, already distributed and allocated + * @param [IN] nt number of tasks at a given level + * @param [IN] level number of levels + * + * @return the parsec object to schedule. + */ +parsec_taskpool_t *pcb_new(parsec_data_collection_t *A, int nt, int level); + +#endif diff --git a/tests/runtime/scheduling/schedmicro_data.c b/tests/runtime/scheduling/rt_data.c similarity index 99% rename from tests/runtime/scheduling/schedmicro_data.c rename to tests/runtime/scheduling/rt_data.c index 101050527..def660a04 100644 --- a/tests/runtime/scheduling/schedmicro_data.c +++ b/tests/runtime/scheduling/rt_data.c @@ -6,7 +6,7 @@ #include "parsec/runtime.h" -#include "schedmicro_data.h" +#include "rt_data.h" #include #include "parsec/data_distribution.h" #include "parsec/data_internal.h" diff --git a/tests/runtime/scheduling/schedmicro_data.h b/tests/runtime/scheduling/rt_data.h similarity index 100% rename from tests/runtime/scheduling/schedmicro_data.h rename to tests/runtime/scheduling/rt_data.h diff --git a/tests/runtime/scheduling/main.c b/tests/runtime/scheduling/schedmicro.c similarity index 99% rename from tests/runtime/scheduling/main.c rename to tests/runtime/scheduling/schedmicro.c index 39b7e7cc9..ecb70e227 100644 --- a/tests/runtime/scheduling/main.c +++ b/tests/runtime/scheduling/schedmicro.c @@ -8,7 +8,7 @@ #include "parsec/runtime.h" #include "parsec/utils/debug.h" #include "ep_wrapper.h" -#include "schedmicro_data.h" +#include "rt_data.h" #include "parsec/os-spec-timing.h" #if defined(PARSEC_HAVE_STRING_H) #include