Skip to content

Commit 170aac8

Browse files
Label perturbation (#110)
* Adapt print_graph to labels * Add labels for perturbation_resistant * Add PR labels in tests * Add PR labels in test inputs * Solve Codacy issues
1 parent c72e470 commit 170aac8

9 files changed

Lines changed: 247 additions & 227 deletions

grape/fault_diagnosis.py

Lines changed: 56 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ def check_input_with_gephi(self):
7373
self.edges_df.to_csv('check_import_edges.csv', index=False)
7474

7575
def fitness_iteration_parallel(self, out_queue, ichunk, chunk_length,
76-
individuals, perturbed_nodes, initial_condition):
76+
individuals, perturbed_nodes, initial_condition, origin):
7777
"""
7878
7979
Parallel iteration for fitness evaluation. We append to the
@@ -96,11 +96,11 @@ def fitness_iteration_parallel(self, out_queue, ichunk, chunk_length,
9696
for iter_ind in range(len(individuals)):
9797
ind_fit = (ichunk*chunk_length + iter_ind, individuals[iter_ind],
9898
self.fitness_evaluation(individuals[iter_ind], perturbed_nodes,
99-
initial_condition))
99+
initial_condition, origin))
100100
out_queue.put(ind_fit)
101101

102102
def fitness_evaluation_parallel(self, pop, perturbed_nodes,
103-
initial_condition):
103+
initial_condition, origin):
104104
"""
105105
106106
Wrapper for fitness evaluation. This methods spawns the processes for
@@ -125,7 +125,7 @@ def fitness_evaluation_parallel(self, pop, perturbed_nodes,
125125
processes = [
126126
mp.Process( target=self.fitness_iteration_parallel,
127127
args=( out_queue, p, len(ind_chunks[0]), ind_chunks[p],
128-
perturbed_nodes, initial_condition ))
128+
perturbed_nodes, initial_condition, origin ))
129129
for p in range(n_procs) ]
130130

131131
for proc in processes:
@@ -143,7 +143,7 @@ def fitness_evaluation_parallel(self, pop, perturbed_nodes,
143143
return fitnesses_tuples
144144

145145
def fitness_evaluation(self, individual, perturbed_nodes,
146-
initial_condition):
146+
initial_condition, origin):
147147
"""
148148
149149
Evaluation of fitness on individual.
@@ -181,7 +181,7 @@ def fitness_evaluation(self, individual, perturbed_nodes,
181181
for node in perturbed_nodes:
182182
if node in T.nodes():
183183

184-
_, broken_nodes = self.rm_nodes(node, T)
184+
_, broken_nodes = self.rm_nodes(node, T, origin)
185185
broken_nodes = list(set(broken_nodes))
186186

187187
for n in broken_nodes: T.remove_node(n)
@@ -200,7 +200,7 @@ def fitness_evaluation(self, individual, perturbed_nodes,
200200
len(usr_with_serv.keys()), sum(dist_from_avg.values()))
201201

202202
def optimizer(self, perturbed_nodes, initial_condition, params, weights,
203-
parallel):
203+
parallel, origin):
204204
"""
205205
206206
Genetic algorithm to optimize switches conditions, using DEAP.
@@ -253,10 +253,10 @@ def optimizer(self, perturbed_nodes, initial_condition, params, weights,
253253
# Evaluate the entire population
254254
if (not parallel) or (len(pop) < mp.cpu_count()):
255255
fitnesses = [toolbox.evaluate(ind, perturbed_nodes,
256-
initial_condition) for ind in pop]
256+
initial_condition, origin) for ind in pop]
257257
else:
258258
res_par = self.fitness_evaluation_parallel(pop, perturbed_nodes,
259-
initial_condition)
259+
initial_condition, origin)
260260
res_par.sort(key=lambda x:x[0])
261261
fitnesses = [x[2] for x in res_par]
262262

@@ -305,10 +305,10 @@ def optimizer(self, perturbed_nodes, initial_condition, params, weights,
305305
# Evaluate the individuals with an invalid fitness
306306
if (not parallel) or (len(invalid_ind) < mp.cpu_count()):
307307
fitnesses = [toolbox.evaluate(ind, perturbed_nodes,
308-
initial_condition) for ind in invalid_ind]
308+
initial_condition, origin) for ind in invalid_ind]
309309
else:
310310
res_par = self.fitness_evaluation_parallel(invalid_ind,
311-
perturbed_nodes, initial_condition)
311+
perturbed_nodes, initial_condition, origin)
312312
res_par.sort(key=lambda x:x[0])
313313
fitnesses = [x[2] for x in res_par]
314314

@@ -389,7 +389,7 @@ def check_paths_and_measures(self, prefix=None):
389389
self.paths_df = pd.merge(self.paths_df, df,
390390
on=['from', 'to', 'ids', 'area'], how='outer')
391391

392-
def rm_nodes(self, node, graph, visited=None, broken_nodes=None):
392+
def rm_nodes(self, node, graph, origin, visited=None, broken_nodes=None):
393393
"""
394394
395395
Remove nodes from the graph in a depth first search way to
@@ -416,51 +416,51 @@ def rm_nodes(self, node, graph, visited=None, broken_nodes=None):
416416
logging.debug(f'Node: {node}')
417417

418418

419-
if bool(graph.perturbation_resistant[node]):
419+
if origin in graph.perturbation_resistant[node]:
420420
logging.debug(f'Node {node} visited, fault resistant node')
421+
logging.debug('Perturbation origin: %s', origin)
421422
return visited, broken_nodes
422423

423-
else:
424-
fathers = {'AND': set(), 'OR': set(), 'SINGLE': set() }
425-
predecessors = list(graph.predecessors(node))
426-
logging.debug(f'Predecessors: {predecessors}')
424+
fathers = {'AND': set(), 'OR': set(), 'SINGLE': set() }
425+
predecessors = list(graph.predecessors(node))
426+
logging.debug(f'Predecessors: {predecessors}')
427427

428-
if len(visited) == 1:
429-
broken_nodes.append(node)
430-
logging.debug(f'Broken: {broken_nodes}')
428+
if len(visited) == 1:
429+
broken_nodes.append(node)
430+
logging.debug(f'Broken: {broken_nodes}')
431431

432-
elif predecessors:
433-
for p in predecessors:
434-
fathers[graph.father_condition[(p, node)]].add(p)
432+
elif predecessors:
433+
for p in predecessors:
434+
fathers[graph.father_condition[(p, node)]].add(p)
435435

436-
if fathers['AND'] & set(broken_nodes):
437-
broken_nodes.append(node)
438-
logging.debug(f'Broken {node}, AND predecessor broken.')
439-
logging.debug(f'Nodes broken so far: {broken_nodes}')
436+
if fathers['AND'] & set(broken_nodes):
437+
broken_nodes.append(node)
438+
logging.debug(f'Broken {node}, AND predecessor broken.')
439+
logging.debug(f'Nodes broken so far: {broken_nodes}')
440+
441+
#'SINGLE' treated as 'AND'
442+
elif fathers['SINGLE'] & set(broken_nodes):
443+
broken_nodes.append(node)
444+
logging.debug(f'Broken {node}, SINGLE predecessor broken.')
445+
logging.debug(f'Nodes broken so far: {broken_nodes}')
440446

441-
#'SINGLE' treated as 'AND'
442-
elif fathers['SINGLE'] & set(broken_nodes):
447+
else:
448+
if (fathers['OR'] & set(broken_nodes)) == set(predecessors):
449+
#all my 'OR' predecessors are dead
443450
broken_nodes.append(node)
444-
logging.debug(f'Broken {node}, SINGLE predecessor broken.')
451+
logging.debug(f'Broken {node}, no more fathers')
445452
logging.debug(f'Nodes broken so far: {broken_nodes}')
446-
447453
else:
448-
if (fathers['OR'] & set(broken_nodes)) == set(predecessors):
449-
#all my 'OR' predecessors are dead
450-
broken_nodes.append(node)
451-
logging.debug(f'Broken {node}, no more fathers')
452-
logging.debug(f'Nodes broken so far: {broken_nodes}')
453-
else:
454-
logging.debug(f'Surviving fathers: {fathers}')
455-
logging.debug(f'Nodes broken so far: {broken_nodes}')
456-
return 0
457-
else:
458-
broken_nodes.append(node)
459-
logging.debug(f'Node: {node} has no more predecessors')
460-
logging.debug(f'Nodes broken so far: {broken_nodes}')
454+
logging.debug(f'Surviving fathers: {fathers}')
455+
logging.debug(f'Nodes broken so far: {broken_nodes}')
456+
return 0
457+
else:
458+
broken_nodes.append(node)
459+
logging.debug(f'Node: {node} has no more predecessors')
460+
logging.debug(f'Nodes broken so far: {broken_nodes}')
461461

462462
for next_node in set(graph[node]) - visited:
463-
self.rm_nodes(next_node, graph, visited, broken_nodes)
463+
self.rm_nodes(next_node, graph, origin, visited, broken_nodes)
464464

465465
return visited, broken_nodes
466466

@@ -502,7 +502,7 @@ def update_status_areas(self, damaged_areas):
502502
for area in damaged_areas:
503503
self.df.loc[self.df.area == area, 'status_area'] = 'DAMAGED'
504504

505-
def delete_a_node(self, node):
505+
def delete_a_node(self, node, origin):
506506
"""
507507
508508
Delete a node in the graph.
@@ -513,15 +513,15 @@ def delete_a_node(self, node):
513513
No check is done within this function.
514514
"""
515515

516-
_ , broken_nodes = self.rm_nodes(node, self.G)
516+
_ , broken_nodes = self.rm_nodes(node, self.G, origin)
517517
broken_nodes = list(set(broken_nodes))
518518

519519
for n in broken_nodes:
520520
self.damaged_areas.add(self.G.area[n])
521521
self.G.remove_node(n)
522522

523523
def apply_perturbation(self, perturbed_nodes, params, weights, parallel,
524-
verbose, kind='element'):
524+
verbose, origin, kind='element'):
525525
"""
526526
527527
Perturbation simulator, actually applying the perturbation
@@ -562,7 +562,7 @@ def apply_perturbation(self, perturbed_nodes, params, weights, parallel,
562562
if self.G.switches:
563563

564564
res = self.optimizer(perturbed_nodes, self.G.init_status, params,
565-
weights, parallel)
565+
weights, parallel, origin)
566566
w = np.asarray(list(weights.values()))
567567

568568
if verbose:
@@ -623,7 +623,8 @@ def apply_perturbation(self, perturbed_nodes, params, weights, parallel,
623623
self.G.final_status = best
624624

625625
for node in perturbed_nodes:
626-
if node in self.G.nodes(): self.delete_a_node(node)
626+
if node in self.G.nodes():
627+
self.delete_a_node(node, origin)
627628

628629
self.check_paths_and_measures(prefix='final_')
629630
self.paths_df.to_csv('service_paths_' + str(kind)+ '_perturbation.csv',
@@ -638,7 +639,7 @@ def apply_perturbation(self, perturbed_nodes, params, weights, parallel,
638639
def simulate_element_perturbation(self, perturbed_nodes,
639640
params={'npop': 300, 'ngen': 100, 'indpb': 0.6, 'tresh': 0.5,
640641
'nsel': 5}, weights={'w1': 1.0, 'w2': -1.0, 'w3': -1.0, 'w4': -1.0,
641-
'w5': 2.0}, parallel=False, verbose=True):
642+
'w5': 2.0}, parallel=False, verbose=True, origin=None):
642643
"""
643644
644645
Simulate a perturbation of one or multiple nodes.
@@ -666,7 +667,7 @@ def simulate_element_perturbation(self, perturbed_nodes,
666667
:param parallel: flag for parallel fitness evaluation of
667668
initial population, default to False
668669
:type parallel: bool, optional
669-
:param verbose: flag for verbose outputi, default to True
670+
:param verbose: flag for verbose output, default to True
670671
:type verbose: bool, optional
671672
672673
.. note:: A perturbation, depending on the considered system,
@@ -685,12 +686,12 @@ def simulate_element_perturbation(self, perturbed_nodes,
685686
sys.exit()
686687

687688
self.apply_perturbation(perturbed_nodes, params, weights, parallel,
688-
verbose, kind='element')
689+
verbose, origin, kind='element')
689690

690691
def simulate_area_perturbation(self, perturbed_areas, params={'npop': 300,
691692
'ngen': 100, 'indpb': 0.6, 'tresh': 0.5, 'nsel': 5},
692693
weights={'w1': 1.0, 'w2': -1.0, 'w3': -1.0, 'w4': -1.0, 'w5': 2.0},
693-
parallel=False, verbose=True):
694+
parallel=False, verbose=True, origin=None):
694695
"""
695696
696697
Simulate a perturbation in one or multiple areas.
@@ -740,7 +741,7 @@ def simulate_area_perturbation(self, perturbed_areas, params={'npop': 300,
740741
if idx_area == area: nodes_in_area.append(idx)
741742

742743
self.apply_perturbation(nodes_in_area, params, weights, parallel,
743-
verbose, kind='area')
744+
verbose, origin, kind='area')
744745

745746
def graph_characterization_to_file(self, filename):
746747
"""

grape/general_graph.py

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -47,13 +47,12 @@ def load(self, filename):
4747
:rtype: pandas.DataFrame, pandas.DataFrame
4848
"""
4949

50-
conv = {'mark' : str, 'father_mark' : str, }
50+
conv = {'mark' : str, 'father_mark' : str,
51+
'perturbation_resistant' : str}
5152
graph_df = pd.read_csv(filename, converters=conv, keep_default_na=False)
52-
cols_to_num = ['perturbation_resistant', 'init_status']
53+
cols_to_num = ['init_status']
5354
graph_df[cols_to_num] = graph_df[cols_to_num].apply(pd.to_numeric,
5455
errors='coerce', axis=1)
55-
cols_to_int = ['perturbation_resistant']
56-
graph_df[cols_to_int] = graph_df[cols_to_int].astype('int64')
5756

5857
for index, row in graph_df.iterrows():
5958

@@ -77,8 +76,15 @@ def load(self, filename):
7776
graph_df.set_index('mark', inplace=True)
7877

7978
self._final_status = {}
80-
nx.set_node_attributes(self, 'AVAILABLE', 'status_area')
81-
nx.set_node_attributes(self, 'ACTIVE', 'mark_status')
79+
nx.set_node_attributes(self, 'AVAILABLE', name='status_area')
80+
nx.set_node_attributes(self, 'ACTIVE', name='mark_status')
81+
82+
perts = nx.get_node_attributes(self, 'perturbation_resistant')
83+
split_perts = {node: perts[node].split("/")
84+
for node in self.nodes}
85+
strip_perts = {node: [x.strip() for x in split_perts[node]]
86+
for node in self.nodes}
87+
nx.set_node_attributes(self, strip_perts, name='perturbation_resistant')
8288

8389
return graph_df, graph_edges_df
8490

@@ -1342,7 +1348,7 @@ def print_graph(self, radius=None, initial_pos=None, fixed_nodes=None,
13421348
edgecolors=border, alpha=0.5)
13431349

13441350
pert_resistant = [node for node in self.perturbation_resistant.keys()
1345-
if self.perturbation_resistant[node] == 1]
1351+
if self.perturbation_resistant[node][0]]
13461352

13471353
for node in pert_resistant:
13481354
col = mymap(area_indices[self.area[node]])

tests/TOY_graph.csv

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,30 @@
11
mark,father_condition,father_mark,area,perturbation_resistant,init_status,description,type,weight,initial_service
2-
"1","ORPHAN","NULL","area1",0,,,"SOURCE",1.0,1.0
3-
"2","SINGLE","1","area1",1,1,,"SWITCH",1.0,0.0
4-
"3","SINGLE","1","area1",1,1,,"SWITCH",1.0,0.0
5-
"4","SINGLE","2","area1",1,,,"HUB",1.0,0.0
6-
"5","SINGLE","3","area1",1,,,"HUB",1.0,0.0
7-
"6","SINGLE","4","area4",0,,,"HUB",1.0,0.0
8-
"6","SINGLE","7","area4",0,,,"HUB",1.0,0.0
9-
"7","SINGLE","6","area4",0,,,"HUB",1.0,0.0
10-
"8","SINGLE","6","area4",0,,,"HUB",1.0,0.0
11-
"6","SINGLE","8","area4",0,,,"HUB",1.0,0.0
12-
"9","OR","8","area3",0,,,"HUB",1.0,0.0
13-
"9","OR","15","area3",0,,,"HUB",1.0,0.0
14-
"15","ORPHAN","NULL","area3",0,,,"SOURCE",1.0,2.0
15-
"16","SINGLE","9","area3",0,,,"HUB",1.0,0.0
16-
"16","SINGLE","17","area3",0,,,"HUB",1.0,0.0
17-
"17","SINGLE","16","area3",0,,,"HUB",1.0,0.0
18-
"10","SINGLE","17","area3",0,,,"HUB",1.0,0.0
19-
"11","AND","10","area2",0,,,"HUB",1.0,0.0
20-
"11","AND","5","area2",0,,,"HUB",1.0,0.0
21-
"19","SINGLE","11","area2",0,,,"HUB",1.0,0.0
22-
"19","SINGLE","12","area2",0,,,"HUB",1.0,0.0
23-
"19","SINGLE","14","area2",0,,,"HUB",1.0,0.0
24-
"12","SINGLE","19","area2",0,,,"HUB",1.0,0.0
25-
"12","SINGLE","13","area2",0,,,"HUB",1.0,0.0
26-
"13","SINGLE","14","area2",0,,,"HUB",1.0,0.0
27-
"13","SINGLE","12","area2",0,,,"HUB",1.0,0.0
28-
"14","SINGLE","19","area2",0,,,"HUB",1.0,0.0
29-
"14","SINGLE","13","area2",0,,,"HUB",1.0,0.0
30-
"18","SINGLE","14","area2",0,,,"USER",1.0,0.0
2+
"1","ORPHAN","NULL","area1",,,,"SOURCE",1.0,1.0
3+
"2","SINGLE","1","area1",perturbation1,1,,"SWITCH",1.0,0.0
4+
"3","SINGLE","1","area1",perturbation1,1,,"SWITCH",1.0,0.0
5+
"4","SINGLE","2","area1",perturbation1,,,"HUB",1.0,0.0
6+
"5","SINGLE","3","area1",perturbation1,,,"HUB",1.0,0.0
7+
"6","SINGLE","4","area4",,,,"HUB",1.0,0.0
8+
"6","SINGLE","7","area4",,,,"HUB",1.0,0.0
9+
"7","SINGLE","6","area4",,,,"HUB",1.0,0.0
10+
"8","SINGLE","6","area4",,,,"HUB",1.0,0.0
11+
"6","SINGLE","8","area4",,,,"HUB",1.0,0.0
12+
"9","OR","8","area3",,,,"HUB",1.0,0.0
13+
"9","OR","15","area3",,,,"HUB",1.0,0.0
14+
"15","ORPHAN","NULL","area3",,,,"SOURCE",1.0,2.0
15+
"16","SINGLE","9","area3",,,,"HUB",1.0,0.0
16+
"16","SINGLE","17","area3",,,,"HUB",1.0,0.0
17+
"17","SINGLE","16","area3",,,,"HUB",1.0,0.0
18+
"10","SINGLE","17","area3",,,,"HUB",1.0,0.0
19+
"11","AND","10","area2",,,,"HUB",1.0,0.0
20+
"11","AND","5","area2",,,,"HUB",1.0,0.0
21+
"19","SINGLE","11","area2",,,,"HUB",1.0,0.0
22+
"19","SINGLE","12","area2",,,,"HUB",1.0,0.0
23+
"19","SINGLE","14","area2",,,,"HUB",1.0,0.0
24+
"12","SINGLE","19","area2",,,,"HUB",1.0,0.0
25+
"12","SINGLE","13","area2",,,,"HUB",1.0,0.0
26+
"13","SINGLE","14","area2",,,,"HUB",1.0,0.0
27+
"13","SINGLE","12","area2",,,,"HUB",1.0,0.0
28+
"14","SINGLE","19","area2",,,,"HUB",1.0,0.0
29+
"14","SINGLE","13","area2",,,,"HUB",1.0,0.0
30+
"18","SINGLE","14","area2",,,,"USER",1.0,0.0

0 commit comments

Comments
 (0)