changeset 11:bef7a9083bd4 version_that_goes_into_paper

Timestamp counters removed
author Merten Sach <msach@mailbox.tu-berlin.de>
date Fri, 16 Dec 2011 13:49:04 +0100
parents 662089f010bb
children 1320dd56673a
files src/Application/main.c
diffstat 1 files changed, 16 insertions(+), 20 deletions(-) [+]
line diff
     1.1 --- a/src/Application/main.c	Fri Dec 09 15:28:12 2011 +0100
     1.2 +++ b/src/Application/main.c	Fri Dec 16 13:49:04 2011 +0100
     1.3 @@ -179,50 +179,46 @@
     1.4     
     1.5     int cpuid = sched_getcpu();
     1.6     
     1.7 -   measurement_t startWorkload, endWorkload;
     1.8 +   measurement_t startWorkload, endWorkload, startWorkload2, endWorkload2;
     1.9     uint64 numCycles;
    1.10 -   TSCountLowHigh startTask, endTask, endSync1, endSync2;
    1.11     for(o=0; o < outer_iters; o++)
    1.12      {
    1.13         
    1.14            saveCyclesAndInstrs(cpuid,startWorkload.cycles);
    1.15 -//          saveTSCLowHigh(startTask);
    1.16         
    1.17 -      //workload
    1.18 +      //workltask
    1.19        for(i=0; i < inner_iters; i++)
    1.20         {
    1.21           workspace1 += (workspace1 + 32)/2;
    1.22           workspace2 += (workspace2 + 23.2)/1.4;
    1.23         }
    1.24        
    1.25 -//          saveTSCLowHigh(endTask);
    1.26 -//          numCycles = endTask.longVal - startTask.longVal;
    1.27            saveCyclesAndInstrs(cpuid,endWorkload.cycles);
    1.28            numCycles = endWorkload.cycles - startWorkload.cycles;
    1.29 -
    1.30            //sanity check (400K is about 20K iters)
    1.31            if( numCycles < 400000 ) {totalWorkCycles += numCycles; numGoodTasks++;}
    1.32            else                     {totalBadCycles  += numCycles; }
    1.33  
    1.34        //mutex access often causes switch to different Slave VP
    1.35        VPThread__mutex_lock(privateMutex, animatingPr);
    1.36 +      
    1.37  /*
    1.38 -          saveTSCLowHigh(endSync1);
    1.39 -          numCycles = endSync1.longVal - endTask.longVal;
    1.40 +          saveCyclesAndInstrs(cpuid,startWorkload2.cycles);
    1.41 +      //Task
    1.42 +      for(i=0; i < inner_iters; i++)
    1.43 +       {
    1.44 +         workspace1 += (workspace1 + 32)/2;
    1.45 +         workspace2 += (workspace2 + 23.2)/1.4;
    1.46 +       }
    1.47 +      
    1.48 +          saveCyclesAndInstrs(cpuid,endWorkload2.cycles);
    1.49 +          numCycles = endWorkload2.cycles - startWorkload2.cycles;
    1.50            //sanity check (400K is about 20K iters)
    1.51 -          if( numCycles < 400000 ) {totalSyncCycles += numCycles; numGoodSyncs++;}
    1.52 -          else                     totalBadSyncCycles  += numCycles;
    1.53 +          if( numCycles < 400000 ) {totalWorkCycles += numCycles; numGoodTasks++;}
    1.54 +          else                     {totalBadCycles  += numCycles; }
    1.55 +      
    1.56  */
    1.57 -      
    1.58        VPThread__mutex_unlock(privateMutex, animatingPr);
    1.59 -/*
    1.60 -          saveTSCLowHigh(endSync2);
    1.61 -          numCycles = endSync2.longVal - endSync1.longVal;
    1.62 -          //sanity check (400K is about 20K iters)
    1.63 -          if( numCycles < 400000 ) {totalSyncCycles += numCycles; numGoodSyncs++;}
    1.64 -          else                     totalBadSyncCycles  += numCycles;
    1.65 -*/
    1.66 -
    1.67      }
    1.68  
    1.69     params->totalWorkCycles = totalWorkCycles;