Compare commits
2 Commits
8e9c7e31c4
...
9379fc80d6
Author | SHA1 | Date | |
---|---|---|---|
9379fc80d6 | |||
61233e920a |
196
exe2json.py
Normal file
196
exe2json.py
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
import os
|
||||||
|
import r2pipe
|
||||||
|
import re
|
||||||
|
import hashlib
|
||||||
|
import log_utils
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
def calc_sha256(file_path):
|
||||||
|
with open(file_path, 'rb') as f:
|
||||||
|
bytes = f.read()
|
||||||
|
sha256obj = hashlib.sha256(bytes)
|
||||||
|
sha256 = sha256obj.hexdigest()
|
||||||
|
return sha256
|
||||||
|
|
||||||
|
def extract_opcode(disasm_text):
|
||||||
|
"""
|
||||||
|
从反汇编文本中提取操作码和操作数
|
||||||
|
正则表达式用于匹配操作码和操作数,考虑到操作数可能包含空格和逗号
|
||||||
|
"""
|
||||||
|
match = re.search(r"^\s*(\S+)(?:\s+(.*))?$", disasm_text)
|
||||||
|
if match:
|
||||||
|
opcode = match.group(1)
|
||||||
|
# operands_str = match.group(2) if match.group(2) is not None else ""
|
||||||
|
# split_pattern = re.compile(r",(?![^\[]*\])") # 用于切分操作数的正则表达式
|
||||||
|
# operands = split_pattern.split(operands_str)
|
||||||
|
# return opcode, [op.strip() for op in operands if op.strip()]
|
||||||
|
return opcode
|
||||||
|
return ""
|
||||||
|
|
||||||
|
def get_graph_cfg_r2pipe(r2pipe_open):
|
||||||
|
# CFG提取
|
||||||
|
acfg_item = []
|
||||||
|
try:
|
||||||
|
# 获取函数列表
|
||||||
|
function_list = r2pipe_open.cmdj("aflj")
|
||||||
|
|
||||||
|
|
||||||
|
for function in function_list:
|
||||||
|
# 局部函数内的特征提取
|
||||||
|
node_list = []
|
||||||
|
edge_list = []
|
||||||
|
temp_edge_list = []
|
||||||
|
block_list = r2pipe_open.cmdj("afbj @" + str(function['offset']))
|
||||||
|
block_number = len(block_list)
|
||||||
|
block_feature_list = []
|
||||||
|
for block in block_list:
|
||||||
|
node_list.append(block["addr"])
|
||||||
|
|
||||||
|
# 获取基本块的反汇编指令
|
||||||
|
disasm = r2pipe_open.cmdj("pdj " + str(block["ninstr"]) + " @" + str(block["addr"]))
|
||||||
|
if disasm:
|
||||||
|
for op in disasm:
|
||||||
|
if op["type"] == "invalid":
|
||||||
|
continue
|
||||||
|
# TODO :这里需要处理指令的特征提取
|
||||||
|
block_feature = ''
|
||||||
|
block_feature_list.append(block_feature)
|
||||||
|
|
||||||
|
|
||||||
|
# 处理跳转指令
|
||||||
|
if "jump" in op and op["jump"] != 0:
|
||||||
|
temp_edge_list.append([block["addr"], op["jump"]])
|
||||||
|
for temp_edge in temp_edge_list:
|
||||||
|
if temp_edge[1] in node_list:
|
||||||
|
edge_list.append(temp_edge)
|
||||||
|
acfg = {
|
||||||
|
'block_number': block_number,
|
||||||
|
'block_edges': [[d[0] for d in edge_list], [d[1] for d in edge_list]],
|
||||||
|
'block_features': block_feature_list
|
||||||
|
}
|
||||||
|
acfg_item.append(acfg)
|
||||||
|
return True, "二进制可执行文件解析成功", acfg_item
|
||||||
|
except Exception as e:
|
||||||
|
return False, e, None
|
||||||
|
|
||||||
|
# for block in block_list:
|
||||||
|
# node_list.append(block["addr"])
|
||||||
|
#
|
||||||
|
# # 获取基本块的反汇编指令
|
||||||
|
# disasm = r2pipe_open.cmdj("pdj " + str(block["ninstr"]) + " @" + str(block["addr"]))
|
||||||
|
# node_info = []
|
||||||
|
# if disasm:
|
||||||
|
# for op in disasm:
|
||||||
|
# if op["type"] == "invalid":
|
||||||
|
# continue
|
||||||
|
# opcode, operands = extract_opcode(op["disasm"])
|
||||||
|
# # 处理跳转指令
|
||||||
|
# if "jump" in op and op["jump"] != 0:
|
||||||
|
# temp_edge_list.append([block["addr"], op["jump"]])
|
||||||
|
# node_info.append([op["offset"], op["bytes"], opcode, op["jump"]])
|
||||||
|
# else:
|
||||||
|
# node_info.append([op["offset"], op["bytes"], opcode, None])
|
||||||
|
# node_info_list.append(node_info)
|
||||||
|
|
||||||
|
# 完成 CFG 构建后, 检查并清理不存在的出边
|
||||||
|
|
||||||
|
|
||||||
|
# 获取排序后元素的原始索引
|
||||||
|
# sorted_indices = [i for i, v in sorted(enumerate(node_list), key=lambda x: x[1])]
|
||||||
|
# # 根据这些索引重新排列
|
||||||
|
# node_list = [node_list[i] for i in sorted_indices]
|
||||||
|
# node_info_list = [node_info_list[i] for i in sorted_indices]
|
||||||
|
#
|
||||||
|
# return True, "二进制可执行文件解析成功", node_list, edge_list, node_info_list
|
||||||
|
# except Exception as e:
|
||||||
|
# return False, e, None, None, None
|
||||||
|
def get_graph_fcg_r2pipe(r2pipe_open):
|
||||||
|
# FCG提取
|
||||||
|
try:
|
||||||
|
function_list = r2pipe_open.cmdj("aflj")
|
||||||
|
node_list = []
|
||||||
|
func_name_list = []
|
||||||
|
edge_list = []
|
||||||
|
temp_edge_list = []
|
||||||
|
function_num = len(function_list)
|
||||||
|
|
||||||
|
for function in function_list:
|
||||||
|
func_name_list.append(function["name"])
|
||||||
|
r2pipe_open.cmd(f's ' + str(function["offset"]))
|
||||||
|
pdf = r2pipe_open.cmdj('pdfj')
|
||||||
|
if pdf is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
node_bytes = ""
|
||||||
|
node_opcode = ""
|
||||||
|
for op in pdf["ops"]:
|
||||||
|
if op["type"] == "invalid":
|
||||||
|
continue
|
||||||
|
|
||||||
|
node_bytes += op["bytes"]
|
||||||
|
opcode = extract_opcode(op["disasm"])
|
||||||
|
node_opcode += opcode + " "
|
||||||
|
if "jump" in op and op["jump"] != 0:
|
||||||
|
temp_edge_list.append([function["offset"], op["jump"]])
|
||||||
|
|
||||||
|
node_list.append(function["offset"])
|
||||||
|
|
||||||
|
|
||||||
|
# 完成 FCG 构建后, 检查并清理不存在的出边
|
||||||
|
for temp_edge in temp_edge_list:
|
||||||
|
if temp_edge[1] in node_list:
|
||||||
|
edge_list.append(temp_edge)
|
||||||
|
sub_function_name_list = ('fcn.', 'loc.', 'main', 'entry')
|
||||||
|
func_name_list = [func_name for func_name in func_name_list if not func_name.startswith(sub_function_name_list)]
|
||||||
|
return True, "二进制可执行文件解析成功", function_num, edge_list, func_name_list
|
||||||
|
except Exception as e:
|
||||||
|
return False, e, None, None, None
|
||||||
|
|
||||||
|
def get_r2pipe(file_path):
|
||||||
|
try:
|
||||||
|
r2 = r2pipe.open(file_path, flags=['-2'])
|
||||||
|
r2.cmd("aaa")
|
||||||
|
r2.cmd('e arch=x86')
|
||||||
|
return r2
|
||||||
|
except Exception as e:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def init_logging():
|
||||||
|
log_file = "./out/exe2json.log"
|
||||||
|
logging = log_utils.setup_logger('exe2json', log_file)
|
||||||
|
return logging
|
||||||
|
|
||||||
|
|
||||||
|
def exe_to_json(file_path, output_path):
|
||||||
|
logging = init_logging()
|
||||||
|
r2 = get_r2pipe(file_path)
|
||||||
|
fcg_Operation_flag, fcg_Operation_message, function_num, function_fcg_edge_list, function_names = get_graph_fcg_r2pipe(r2)
|
||||||
|
cfg_Operation_flag, cfg_Operation_message, cfg_item = get_graph_cfg_r2pipe(r2)
|
||||||
|
file_fingerprint = calc_sha256(file_path)
|
||||||
|
if fcg_Operation_flag and cfg_Operation_flag:
|
||||||
|
json_obj = {
|
||||||
|
'hash': file_fingerprint,
|
||||||
|
'function_number': function_num,
|
||||||
|
'function_edges': [[int(d[0]) for d in function_fcg_edge_list],
|
||||||
|
[int(d[1]) for d in function_fcg_edge_list]],
|
||||||
|
'acfg_list': cfg_item,
|
||||||
|
'function_names': function_names
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
logging.error(f"二进制可执行文件解析失败 文件地址{file_path}")
|
||||||
|
if not fcg_Operation_flag:
|
||||||
|
logging.error(f"fcg错误:{fcg_Operation_message}")
|
||||||
|
if not cfg_Operation_flag:
|
||||||
|
logging.error(f"cfg错误:{cfg_Operation_message}")
|
||||||
|
return False
|
||||||
|
r2.quit()
|
||||||
|
result = json.dumps(json_obj,ensure_ascii=False)
|
||||||
|
with open(os.path.join(output_path, file_fingerprint + '.jsonl'), 'w') as out:
|
||||||
|
out.write(result)
|
||||||
|
out.close()
|
||||||
|
return True
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
test_file_path = '/mnt/d/bishe/exe2json/sample/VirusShare_0a3b625380161cf92c4bb10135326bb5'
|
||||||
|
exe_to_json(test_file_path, './out/json')
|
@ -2,35 +2,55 @@ import concurrent.futures
|
|||||||
import os
|
import os
|
||||||
import r2pipe
|
import r2pipe
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_fun_name_list(file_path):
|
def get_fun_name_list(file_path):
|
||||||
# 读取csv文件
|
# 读取csv文件
|
||||||
r2 = r2pipe.open(os.path.join(file_path), flags=['-2'])
|
|
||||||
r2.cmd('aaa')
|
|
||||||
r2.cmd('e arch=x86')
|
|
||||||
function_list = r2.cmdj("aflj")
|
|
||||||
fun_name_list = []
|
fun_name_list = []
|
||||||
for function in function_list:
|
try:
|
||||||
fun_name_list.append(function['name'])
|
r2 = r2pipe.open(os.path.join(file_path), flags=['-2'])
|
||||||
|
r2.cmd('aaa')
|
||||||
|
r2.cmd('e arch=x86')
|
||||||
|
function_list = r2.cmdj("aflj")
|
||||||
|
|
||||||
|
for function in function_list:
|
||||||
|
fun_name_list.append(function['name'])
|
||||||
|
except Exception as err:
|
||||||
|
print(f'error at {file_path} , {err}')
|
||||||
r2.quit()
|
r2.quit()
|
||||||
return fun_name_list
|
return fun_name_list
|
||||||
|
|
||||||
|
def fun_name_count():
|
||||||
if __name__ == '__main__':
|
|
||||||
file_path = os.path.join('/mnt/d/bishe/dataset/sample_20230130_458')
|
file_path = os.path.join('/mnt/d/bishe/dataset/sample_20230130_458')
|
||||||
file_list = os.listdir(file_path)
|
bengin_file_path = os.path.join('/mnt/d/bishe/dataset/train_benign')
|
||||||
|
file_list = [os.path.join(file_path, file_name) for file_name in os.listdir(file_path)]
|
||||||
|
file_list.extend([os.path.join(bengin_file_path, file_name) for file_name in os.listdir(bengin_file_path)])
|
||||||
fun_name_set = {}
|
fun_name_set = {}
|
||||||
with concurrent.futures.ThreadPoolExecutor(max_workers=6) as executor:
|
with concurrent.futures.ThreadPoolExecutor(max_workers=12) as executor:
|
||||||
future_to_args = {
|
future_to_args = {
|
||||||
executor.submit(get_fun_name_list, os.path.join(file_path, file_name)): file_name
|
executor.submit(get_fun_name_list, file_name): file_name
|
||||||
for file_name in file_list
|
for file_name in file_list
|
||||||
}
|
}
|
||||||
for future in tqdm(concurrent.futures.as_completed(future_to_args), total=len(future_to_args)):
|
for future in tqdm(concurrent.futures.as_completed(future_to_args), total=len(future_to_args)):
|
||||||
fun_name_list = future.result()
|
fun_name_list = future.result()
|
||||||
for fun_name in fun_name_list:
|
if fun_name_list:
|
||||||
if fun_name not in fun_name_set:
|
for fun_name in fun_name_list:
|
||||||
fun_name_set[fun_name] = 1
|
if fun_name not in fun_name_set:
|
||||||
else:
|
fun_name_set[fun_name] = 1
|
||||||
fun_name_set[fun_name] += 1
|
else:
|
||||||
print(fun_name_set)
|
fun_name_set[fun_name] += 1
|
||||||
|
pd.DataFrame(fun_name_set.items(), columns=['fun_name', 'count']).to_csv('./out/fun_name.csv', index=False, mode='a')
|
||||||
|
|
||||||
|
def fun_name_sort():
|
||||||
|
fun_name_df = pd.read_csv('./out/fun_name.csv')
|
||||||
|
# 去除fun_name中fun_name列中的局部函数
|
||||||
|
for item in ['fcn.', 'loc.', 'main', 'entr']:
|
||||||
|
fun_name_df = fun_name_df[fun_name_df['fun_name'].apply(lambda x: item not in x and item not in x)]
|
||||||
|
fun_name_df = fun_name_df.sort_values(by='count', ascending=False)[:10000]
|
||||||
|
fun_name_df.to_csv('fun_name_sort.csv', index=False)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
fun_name_count()
|
||||||
|
fun_name_sort()
|
||||||
|
10
ngram.py
10
ngram.py
@ -102,9 +102,8 @@ def process_csv_file(csvfile, ngram_type, file_percent_filter, frequency_filter)
|
|||||||
idx + 1, file_percent_filter, frequency_filter): start for start in
|
idx + 1, file_percent_filter, frequency_filter): start for start in
|
||||||
range(0, len(dataframe['corpus'].values), 10000)
|
range(0, len(dataframe['corpus'].values), 10000)
|
||||||
}
|
}
|
||||||
|
for future in tqdm(concurrent.futures.as_completed(future_to_args), total=len(future_to_args),
|
||||||
|
desc=f'Computing {ngram_type}-gram on files'):
|
||||||
for future in concurrent.futures.as_completed(future_to_args):
|
|
||||||
try:
|
try:
|
||||||
sub_ngram_list, sub_filtered_ngram_list = future.result()
|
sub_ngram_list, sub_filtered_ngram_list = future.result()
|
||||||
for i in [sub_ngram_list, ngram_list]:
|
for i in [sub_ngram_list, ngram_list]:
|
||||||
@ -113,10 +112,11 @@ def process_csv_file(csvfile, ngram_type, file_percent_filter, frequency_filter)
|
|||||||
for i in [sub_filtered_ngram_list, filtered_ngram_list]:
|
for i in [sub_filtered_ngram_list, filtered_ngram_list]:
|
||||||
for key, value in i.items():
|
for key, value in i.items():
|
||||||
filtered_ngram_list[key] += value
|
filtered_ngram_list[key] += value
|
||||||
process_bar.update(10000) # 手动更新进度条
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
logging.error(f"Error processing {idx + 1}-gram: {exc}")
|
logging.error(f"Error processing {idx + 1}-gram: {exc}")
|
||||||
return ngram_list, filtered_ngram_list
|
return ngram_list, filtered_ngram_list
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------------------------------------
|
# --------------------------------------------------------------------------------------------------
|
||||||
# Execution starts here
|
# Execution starts here
|
||||||
|
21
ngramSort.py
Normal file
21
ngramSort.py
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
def extract_features(file_path):
|
||||||
|
# 读取csv文件
|
||||||
|
df = pd.read_csv(file_path, delimiter=',')
|
||||||
|
|
||||||
|
# 按第2列数值降序排序
|
||||||
|
df["count"] = pd.to_numeric(df["count"], errors='coerce')
|
||||||
|
df_sorted = df.sort_values(by='count', ascending=True)
|
||||||
|
|
||||||
|
# 筛选出第2列值大于10000的行,并提取第1列内容
|
||||||
|
features = df_sorted[df_sorted['count'] <0]
|
||||||
|
|
||||||
|
return features
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
|
||||||
|
# 使用函数,传入csv文件路径
|
||||||
|
features = extract_features('./out/3gram.csv')
|
||||||
|
print(features)
|
Loading…
Reference in New Issue
Block a user