-
Notifications
You must be signed in to change notification settings - Fork 67
Expand file tree
/
Copy pathsanitize.py
More file actions
304 lines (246 loc) · 9.17 KB
/
sanitize.py
File metadata and controls
304 lines (246 loc) · 9.17 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
"""Post-processing LLM-generated Python code implemented using tree-sitter."""
import os
import pathlib
from typing import Dict, Generator, List, Optional, Set, Tuple
from pqdm.processes import pqdm
from tqdm import tqdm
import tree_sitter_python
from tree_sitter import Language, Node, Parser
from bigcodebench.data import (
get_bigcodebench,
load_solutions,
write_directory,
write_jsonl,
)
from bigcodebench.syncheck import syntax_check
CLASS_TYPE = "class_definition"
FUNCTION_TYPE = "function_definition"
IMPORT_TYPE = ["import_statement", "import_from_statement"]
IDENTIFIER_TYPE = "identifier"
ATTRIBUTE_TYPE = "attribute"
RETURN_TYPE = "return_statement"
EXPRESSION_TYPE = "expression_statement"
ASSIGNMENT_TYPE = "assignment"
def code_extract(text: str) -> str:
lines = text.split("\n")
longest_line_pair = (0, 0)
longest_so_far = 0
for i in range(len(lines)):
for j in range(i + 1, len(lines)):
current_lines = "\n".join(lines[i : j + 1])
if syntax_check(current_lines):
current_length = sum(1 for line in lines[i : j + 1] if line.strip())
if current_length > longest_so_far:
longest_so_far = current_length
longest_line_pair = (i, j)
return "\n".join(lines[longest_line_pair[0] : longest_line_pair[1] + 1])
def get_deps(nodes: List[Tuple[str, Node]]) -> Dict[str, Set[str]]:
def dfs_get_deps(node: Node, deps: Set[str]) -> None:
for child in node.children:
if child.type == IDENTIFIER_TYPE:
deps.add(child.text.decode("utf8"))
else:
dfs_get_deps(child, deps)
name2deps = {}
for name, node in nodes:
deps = set()
dfs_get_deps(node, deps)
name2deps[name] = deps
return name2deps
def get_function_dependency(entrypoint: str, call_graph: Dict[str, str]) -> Set[str]:
queue = [entrypoint]
visited = {entrypoint}
while queue:
current = queue.pop(0)
if current not in call_graph:
continue
for neighbour in call_graph[current]:
if not (neighbour in visited):
visited.add(neighbour)
queue.append(neighbour)
return visited
def get_definition_name(node: Node) -> str:
for child in node.children:
if child.type == IDENTIFIER_TYPE:
return child.text.decode("utf8")
def traverse_tree(node: Node) -> Generator[Node, None, None]:
cursor = node.walk()
depth = 0
visited_children = False
while True:
if not visited_children:
yield cursor.node
if not cursor.goto_first_child():
depth += 1
visited_children = True
elif cursor.goto_next_sibling():
visited_children = False
elif not cursor.goto_parent() or depth == 0:
break
else:
depth -= 1
def has_return_statement(node: Node) -> bool:
traverse_nodes = traverse_tree(node)
for node in traverse_nodes:
if node.type == RETURN_TYPE:
return True
return False
def extract_target_code_or_empty(code: str, entrypoint: Optional[str] = None) -> str:
code = code_extract(code.strip())
code_bytes = bytes(code, "utf8")
parser = Parser(Language(tree_sitter_python.language()))
tree = parser.parse(code_bytes)
class_names = set()
function_names = set()
variable_names = set()
root_node = tree.root_node
import_nodes = []
definition_nodes = []
for child in root_node.children:
if child.type in IMPORT_TYPE:
import_nodes.append(child)
elif child.type == CLASS_TYPE:
name = get_definition_name(child)
if not (
name in class_names or name in variable_names or name in function_names
):
definition_nodes.append((name, child))
class_names.add(name)
elif child.type == FUNCTION_TYPE:
name = get_definition_name(child)
if not (
name in function_names or name in variable_names or name in class_names
):
definition_nodes.append((name, child))
function_names.add(get_definition_name(child))
elif (
child.type == EXPRESSION_TYPE and child.children[0].type == ASSIGNMENT_TYPE
):
subchild = child.children[0]
name = get_definition_name(subchild)
if not (
name in variable_names or name in function_names or name in class_names
):
definition_nodes.append((name, subchild))
variable_names.add(name)
if entrypoint:
name2deps = get_deps(definition_nodes)
reacheable = get_function_dependency(entrypoint, name2deps)
sanitized_output = b""
for node in import_nodes:
sanitized_output += code_bytes[node.start_byte : node.end_byte] + b"\n"
for pair in definition_nodes:
name, node = pair
if entrypoint and not (name in reacheable):
continue
sanitized_output += code_bytes[node.start_byte : node.end_byte] + b"\n"
sanitized_output = sanitized_output[:-1].decode("utf8")
# ad-hoc approach to remove unnecessary lines, but it works
lines = sanitized_output.splitlines()
outer_lines = []
for i in range(len(lines) - 1, -1, -1):
if lines[i].startswith(" "):
break
if not lines[i].startswith(" ") and entrypoint in lines[i]:
outer_lines.append(i)
if outer_lines:
sanitized_output = "\n".join(lines[: outer_lines[-1]])
return sanitized_output
def sanitize(code: str, entrypoint: Optional[str] = None) -> str:
if code is None:
return ""
sanitized_code = extract_target_code_or_empty(code, entrypoint).strip()
if not sanitized_code:
return code_extract(code)
return sanitized_code
def process_solution(
sample_solution: Dict,
dataset: Dict,
entry_point: Dict,
debug_task: str = None,
calibrate: bool = False,
is_folder: bool = False,
target_path: str = None,
):
task_id = sample_solution.get("task_id")
if not task_id or task_id not in dataset:
return None
dbg_identifier = sample_solution["_identifier"]
if debug_task is not None and task_id != debug_task:
return None
function_name = entry_point.get(task_id)
old_code = sample_solution.get("solution")
if old_code is None:
assert "completion" in sample_solution, sample_solution
old_code = dataset[task_id]["complete_prompt"] + "\n" + sample_solution.get("completion")
else:
if calibrate:
old_code = old_code.replace("```python\n ", "```python\n"+dataset[task_id]["complete_prompt"]+" ")
new_code = sanitize(code=old_code, entrypoint=function_name)
# if old code and new code are different, print msg
if new_code != old_code:
msg = "Sanitized: " + dbg_identifier
if is_folder:
msg += " -> " + dbg_identifier.replace(samples, target_path)
print(msg)
return {"task_id": task_id, "solution": new_code}
def script(
samples: str, inplace: bool = False, debug_task: str = None, calibrate: bool = False, parallel: int=32
):
# task_id -> entry_point
entry_point = {}
# merge two datasets
dataset = {**get_bigcodebench()}
for task_id, problem in dataset.items():
entry_point[task_id] = problem["entry_point"]
# make a new folder with "-sanitized" suffix
is_folder = os.path.isdir(samples)
target_path = pathlib.Path(samples)
if not inplace:
if is_folder:
if calibrate:
new_name = target_path.name + "-sanitized-calibrated"
else:
new_name = target_path.name + "-sanitized"
else:
if calibrate:
new_name = target_path.name.replace(".jsonl", "-sanitized-calibrated.jsonl")
else:
new_name = target_path.name.replace(".jsonl", "-sanitized.jsonl")
target_path = target_path.parent / new_name
target_path = str(target_path)
nsan = 0
ntotal = 0
new_solutions = []
parallel_arg_list = [
{
"sample_solution": sample_solution,
"dataset": dataset,
"entry_point": entry_point,
"debug_task": debug_task,
"calibrate": calibrate,
"is_folder": is_folder,
"target_path": target_path
}
for sample_solution in load_solutions(samples)
]
results = pqdm(parallel_arg_list, process_solution, n_jobs=min(parallel, os.cpu_count()), argument_type="kwargs")
for result in results:
if result is not None:
new_solutions.append(result)
nsan += 1
ntotal += 1
if is_folder:
write_directory(target_path, new_solutions)
else:
write_jsonl(target_path, new_solutions)
if nsan > 0:
print(f"Sanitized {nsan} out of {ntotal} files.")
else:
print(f"All files seems valid -- no files are sanitized.")
print(f"Check the sanitized files at {target_path}")
def main():
from fire import Fire
Fire(script)
if __name__ == "__main__":
main()