Mercurial > cgi-bin > hgwebdir.cgi > VMS > VMS_Implementations > VSs_impls > VSs__MC_shared_impl
changeset 47:a7c77d69d127 dev_expl_VP_and_DKU
update for api v. nanox-0.7
author | Nina Engelhardt <nengel@mailbox.tu-berlin.de> |
---|---|
date | Fri, 05 Jul 2013 15:36:42 +0200 |
parents | 6be6b0394537 |
children | b8bb94b990c4 |
files | nanos-vss.c |
diffstat | 1 files changed, 38 insertions(+), 38 deletions(-) [+] |
line diff
1.1 --- a/nanos-vss.c Fri Jul 05 14:41:45 2013 +0200 1.2 +++ b/nanos-vss.c Fri Jul 05 15:36:42 2013 +0200 1.3 @@ -23,11 +23,11 @@ 1.4 } 1.5 1.6 nanos_err_t nanos_create_wd_compact(nanos_wd_t *wd, nanos_const_wd_definition_t *const_data, nanos_wd_dyn_props_t *dyn_props, 1.7 - size_t data_size, void ** data, nanos_wg_t wg, nanos_copy_data_t **copies) { 1.8 - 1.9 + size_t data_size, void ** data, nanos_wg_t wg, nanos_copy_data_t **copies, nanos_region_dimension_internal_t **dimensions) { 1.10 + 1.11 *wd = NULL; 1.12 return NANOS_OK; 1.13 - 1.14 + 1.15 VSsTaskStub* ts = (VSsTaskStub*) malloc(sizeof (VSsTaskStub)); 1.16 void* alloc_data = malloc(data_size); 1.17 *data = alloc_data; 1.18 @@ -35,7 +35,7 @@ 1.19 1.20 VSsTaskType* taskType = (VSsTaskType*) malloc(sizeof (VSsTaskType)); 1.21 1.22 - taskType->fn = *((VSsTaskFnPtr*)((nanos_device_t*) &const_data[1])->arg); 1.23 + taskType->fn = *((VSsTaskFnPtr*) ((nanos_device_t*) & const_data[1])->arg); 1.24 taskType->sizeOfArgs = data_size; 1.25 1.26 ts->taskType = taskType; 1.27 @@ -45,19 +45,19 @@ 1.28 return NANOS_OK; 1.29 } 1.30 1.31 -nanos_err_t nanos_submit(nanos_wd_t wd, size_t num_deps, nanos_dependence_t *deps, nanos_team_t team) { 1.32 +nanos_err_t nanos_submit(nanos_wd_t wd, size_t num_data_accesses, nanos_data_access_t *data_accesses, nanos_team_t team) { 1.33 1.34 return NANOS_UNIMPLEMENTED; 1.35 - 1.36 - int32* depsTypes = malloc(sizeof (int32) * num_deps); 1.37 - size_t* depsSizes = malloc(sizeof (size_t) * num_deps); 1.38 - void** depsAddrs = malloc(sizeof (void*)*num_deps); 1.39 + 1.40 + int32* depsTypes = malloc(sizeof (int32) * num_data_accesses); 1.41 + size_t* depsSizes = malloc(sizeof (size_t) * num_data_accesses); 1.42 + void** depsAddrs = malloc(sizeof (void*)*num_data_accesses); 1.43 1.44 int i; 1.45 - for (i = 0; i < num_deps; i++) { 1.46 - depsAddrs[i] = (void *) ( (char *) (*deps[i].address) + deps[i].offset ); 1.47 - depsTypes[i] = (deps[i].flags.output) ? WRITER : READER; 1.48 - depsSizes[i] = deps[i].size; 1.49 + for (i = 0; i < num_data_accesses; i++) { 1.50 + depsAddrs[i] = (void*)((uintptr_t)data_accesses[i].address + data_accesses[i].offset ); 1.51 + depsTypes[i] = (data_accesses[i].flags.output) ? WRITER : READER; 1.52 + depsSizes[i] = data_accesses[i].dimensions[0].size; 1.53 } 1.54 1.55 VSsTaskStub* ts = (VSsTaskStub*) wd; 1.56 @@ -65,7 +65,7 @@ 1.57 1.58 1.59 VSsTaskType* taskType = ts->taskType; 1.60 - taskType->numDeps = num_deps; 1.61 + taskType->numDeps = num_data_accesses; 1.62 taskType->depsTypes = depsTypes; 1.63 taskType->depsSizes = depsSizes; 1.64 1.65 @@ -82,7 +82,7 @@ 1.66 int32* taskID; 1.67 taskID = VSs__create_taskID_of_size(1); 1.68 taskID[1] = tasks_created++; 1.69 - 1.70 + 1.71 reqData.taskID = taskID; 1.72 1.73 free(ts); 1.74 @@ -92,20 +92,20 @@ 1.75 return NANOS_OK; 1.76 } 1.77 1.78 -nanos_err_t nanos_create_wd_and_run_compact(nanos_const_wd_definition_t *const_data, nanos_wd_dyn_props_t *dyn_props, 1.79 - size_t data_size, void * data, size_t num_deps, nanos_dependence_t *deps, 1.80 - nanos_copy_data_t *copies, nanos_translate_args_t translate_args) { 1.81 +nanos_err_t nanos_create_wd_and_run_compact( nanos_const_wd_definition_t *const_data, nanos_wd_dyn_props_t *dyn_props, 1.82 + size_t data_size, void * data, size_t num_data_accesses, nanos_data_access_t *data_accesses, 1.83 + nanos_copy_data_t *copies, nanos_region_dimension_internal_t *dimensions, nanos_translate_args_t translate_args ) { 1.84 1.85 1.86 - int32* depsTypes = malloc(sizeof (int32) * num_deps); 1.87 - size_t* depsSizes = malloc(sizeof (size_t) * num_deps); 1.88 - void** depsAddrs = malloc(sizeof (void*) * num_deps); 1.89 + int32* depsTypes = malloc(sizeof (int32) * num_data_accesses); 1.90 + size_t* depsSizes = malloc(sizeof (size_t) * num_data_accesses); 1.91 + void** depsAddrs = malloc(sizeof (void*) * num_data_accesses); 1.92 1.93 int i; 1.94 - for (i = 0; i < num_deps; i++) { 1.95 - depsAddrs[i] = (void *) ( (char *) (*deps[i].address) + deps[i].offset ); 1.96 - depsTypes[i] = (deps[i].flags.output) ? WRITER : READER; 1.97 - depsSizes[i] = deps[i].size; 1.98 + for (i = 0; i < num_data_accesses; i++) { 1.99 + depsAddrs[i] = (void*)((uintptr_t)data_accesses[i].address + data_accesses[i].offset ); 1.100 + depsTypes[i] = (data_accesses[i].flags.output) ? WRITER : READER; 1.101 + depsSizes[i] = data_accesses[i].dimensions[0].size; 1.102 } 1.103 1.104 /* const_data is declared as: 1.105 @@ -121,8 +121,8 @@ 1.106 1.107 VSsTaskType* taskType = (VSsTaskType*) malloc(sizeof (VSsTaskType)); 1.108 1.109 - taskType->fn = *((VSsTaskFnPtr*)((nanos_device_t*) &const_data[1])->arg); 1.110 - taskType->numDeps = num_deps; 1.111 + taskType->fn = *((VSsTaskFnPtr*) ((nanos_device_t*) & const_data[1])->arg); 1.112 + taskType->numDeps = num_data_accesses; 1.113 taskType->depsTypes = depsTypes; 1.114 taskType->depsSizes = depsSizes; 1.115 taskType->sizeOfArgs = data_size; 1.116 @@ -139,7 +139,7 @@ 1.117 int32* taskID; 1.118 taskID = VSs__create_taskID_of_size(1); 1.119 taskID[1] = tasks_created++; 1.120 - 1.121 + 1.122 reqData.taskID = taskID; 1.123 1.124 VMS_WL__send_sem_request(&reqData, currVP); 1.125 @@ -162,15 +162,15 @@ 1.126 return NANOS_OK; 1.127 } 1.128 1.129 -nanos_err_t nanos_wait_on(size_t num_deps, nanos_dependence_t *deps) { 1.130 +nanos_err_t nanos_wait_on( size_t num_data_accesses, nanos_data_access_t *data_accesses ) { 1.131 VSsSemReq reqData; 1.132 1.133 reqData.reqType = taskwait_on; 1.134 reqData.callingSlv = currVP; 1.135 1.136 int i; 1.137 - for (i = 0; i < num_deps; i++) { 1.138 - reqData.args = deps[i].address; 1.139 + for (i = 0; i < num_data_accesses; i++) { 1.140 + reqData.args = data_accesses[i].address; 1.141 VMS_WL__send_sem_request(&reqData, currVP); 1.142 } 1.143 1.144 @@ -186,7 +186,7 @@ 1.145 reqData.criticalID = lock; 1.146 1.147 VMS_WL__send_sem_request(&reqData, currVP); 1.148 - 1.149 + 1.150 return NANOS_OK; 1.151 } 1.152 1.153 @@ -199,30 +199,30 @@ 1.154 reqData.criticalID = lock; 1.155 1.156 VMS_WL__send_sem_request(&reqData, currVP); 1.157 - 1.158 + 1.159 return NANOS_OK; 1.160 } 1.161 1.162 -nanos_err_t nanos_omp_barrier ( void ){ 1.163 +nanos_err_t nanos_omp_barrier(void) { 1.164 VSsSemReq reqData; 1.165 1.166 reqData.reqType = barrier; 1.167 reqData.callingSlv = currVP; 1.168 1.169 VMS_WL__send_sem_request(&reqData, currVP); 1.170 - 1.171 + 1.172 return NANOS_OK; 1.173 } 1.174 1.175 -nanos_err_t nanos_in_final(bool *result){ 1.176 +nanos_err_t nanos_in_final(bool *result) { 1.177 *result = false; 1.178 return NANOS_OK; 1.179 } 1.180 1.181 -void * nanos_smp_factory( void *args){ 1.182 +void * nanos_smp_factory(void *args) { 1.183 return NULL; 1.184 } 1.185 1.186 -void nanos_omp_set_interface ( void * arg){ 1.187 +void nanos_omp_set_interface(void * arg) { 1.188 return; 1.189 }