# HG changeset patch # User Nina Engelhardt # Date 1324372048 -3600 # Node ID adac95f01c6f01acfcf6b219d92bbb6fd8c76dd5 # Parent 0fb514d583dea191237f69a60177f080a403ac31 move scripts for counter treatment here diff -r 0fb514d583de -r adac95f01c6f scripts/ucc_and_loop_graph_treatment/parse_ucc.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/scripts/ucc_and_loop_graph_treatment/parse_ucc.py Tue Dec 20 10:07:28 2011 +0100 @@ -0,0 +1,30 @@ +#!/usr/bin/python + +import sys +import csv + + +def read_from_file(filename): + d = {} + reader = csv.reader(filename) + for row in reader: + try: + if row[0] == "unit": + if not d.has_key("unit"): # not key in d: + d["unit"] = [] + d["unit"].append( ( int(row[1]),int(row[2]) ) ) + if row[0] == "ctlDep": + if not d.has_key("ctlDep"): + d["ctlDep"] = [] + d["ctlDep"].append( ( (int(row[1]),int(row[2])) , (int(row[3]),int(row[4])) ) ) + if row[0] == "commDep": + if not d.has_key("commDep"): + d["commDep"] = [] + d["commDep"].append(( (int(row[1]),int(row[2])) , (int(row[3]),int(row[4])) )) + if row[0] == "NtoN": + if not d.has_key("NtoN"): + d["NtoN"] = [] + d["NtoN"].append((row[2:2+row[1]],row[2+row[1]:2+2*row[1]])) + except Exception: + continue + return d diff -r 0fb514d583de -r adac95f01c6f scripts/ucc_and_loop_graph_treatment/run_graph.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/scripts/ucc_and_loop_graph_treatment/run_graph.py Tue Dec 20 10:07:28 2011 +0100 @@ -0,0 +1,75 @@ +#!/usr/bin/python + +import sys +import pygraph + +if len(sys.argv)<3 : + print "Usage:",sys.argv[0],"dependencyfile.dot counterfile.csv" + sys.exit() + +try: + dependencyfile = open(sys.argv[1]) + dependencystring = dependencyfile.read() +except IOError as (errno, strerror): + print "Error {0}: {1}".format(errno, strerror) + sys.exit() + +try: + counterfile = open(sys.argv[2]) +except IOError as (errno, strerror): + print (("Error {0}: {1}".format)(errno, strerror)) + sys.exit() + +from pygraph.readwrite.dot import read,write + +dependencygraph = read(dependencystring) + +import csv + +counterreader = (csv.reader)(counterfile) + + + +for row in counterreader: + dependencygraph.add_node_attribute('VP_{0}_{1}'.format(row[0],row[1]),('suspend_point',row[2])) + dependencygraph.add_node_attribute('VP_{0}_{1}'.format(row[0],row[1]),('schedule_check_cycles',int(row[4])-int(row[19]))) + dependencygraph.add_node_attribute('VP_{0}_{1}'.format(row[0],row[1]),('sync_cycles',int(row[25])-int(row[24]))) + dependencygraph.add_node_attribute('VP_{0}_{1}'.format(row[0],row[1]),('assign_cycles',int(row[10])-int(row[7]))) + dependencygraph.add_node_attribute('VP_{0}_{1}'.format(row[0],row[1]),('work_comm_cycles',int(row[16])-int(row[13]))) + dependencygraph.add_node_attribute('VP_{0}_{1}'.format(row[0],row[1]),('status_cycles',int(row[22])-int(row[16]))) + + +def path_length(path): + length = 0 + for node in path: + attrd = dict(dependencygraph.node_attributes(node)) + length += attrd['schedule_check_cycles'] + length += attrd['sync_cycles'] + length += attrd['assign_cycles'] + length += attrd['work_comm_cycles'] + length += attrd['status_cycles'] + return length + + +def find_critical_path(graph, start, end, path=[]): + path = path + [end] + if start == end: + return path + if not graph.has_node(end): + return None + longest = None + for node in graph.incidents(end): + if node not in path: + newpath = find_critical_path(graph, start, node, path) + if newpath: + if not longest or path_length(newpath) > path_length(longest): + longest = newpath + return longest + +cr= find_critical_path(dependencygraph, 'VP_2_0', 'VP_2_97') + +for node in cr: + dependencygraph.add_node_attribute(node,('color','red')) + dependencygraph.add_node_attribute(node,('style','bold')) + +print write(dependencygraph)