线程池版本
This commit is contained in:
parent
52b0cf6db3
commit
3f4bde2989
18
OpcodeGet.py
18
OpcodeGet.py
@ -6,12 +6,14 @@ from tqdm import tqdm
|
|||||||
import r2pipe
|
import r2pipe
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
def Opcode_to_csv(opcode_list, file_type):
|
def Opcode_to_csv(opcode_list, file_type):
|
||||||
logger.info("*======================start write==================================*")
|
logger.info("*======================start write==================================*")
|
||||||
csv_write(f'output_{file_type}.csv', opcode_list)
|
csv_write(f'output_{file_type}.csv', opcode_list)
|
||||||
logger.info(f"done {done_file_num} files")
|
logger.info(f"done {done_file_num} files")
|
||||||
logger.info("*=================write to csv success==============================*")
|
logger.info("*=================write to csv success==============================*")
|
||||||
|
|
||||||
|
|
||||||
def csv_write(file_name, data: list):
|
def csv_write(file_name, data: list):
|
||||||
"""write data to csv"""
|
"""write data to csv"""
|
||||||
df = pd.DataFrame(data)
|
df = pd.DataFrame(data)
|
||||||
@ -19,6 +21,8 @@ def csv_write(file_name, data: list):
|
|||||||
for i in range(0, len(df), chunksize):
|
for i in range(0, len(df), chunksize):
|
||||||
df.iloc[i:i + chunksize].to_csv(f'./out/{file_name}', mode='a', header=False, index=False)
|
df.iloc[i:i + chunksize].to_csv(f'./out/{file_name}', mode='a', header=False, index=False)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def extract_opcode(disasm_text):
|
def extract_opcode(disasm_text):
|
||||||
"""
|
"""
|
||||||
从反汇编文本中提取操作码和操作数
|
从反汇编文本中提取操作码和操作数
|
||||||
@ -34,6 +38,7 @@ def extract_opcode(disasm_text):
|
|||||||
return opcode
|
return opcode
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
def get_graph_r2pipe(r2pipe_open, file_type):
|
def get_graph_r2pipe(r2pipe_open, file_type):
|
||||||
# 获取基础块内的操作码序列
|
# 获取基础块内的操作码序列
|
||||||
opcode_Sequence = []
|
opcode_Sequence = []
|
||||||
@ -66,16 +71,15 @@ def get_graph_r2pipe(r2pipe_open, file_type):
|
|||||||
if op["type"] == "invalid":
|
if op["type"] == "invalid":
|
||||||
continue
|
continue
|
||||||
block_opcode_Sequence.append(extract_opcode(op["opcode"]))
|
block_opcode_Sequence.append(extract_opcode(op["opcode"]))
|
||||||
opcode_Sequence.append([file_type, file_type, len(block_opcode_Sequence), ' '.join(block_opcode_Sequence)])
|
opcode_Sequence.append(
|
||||||
|
[file_type, file_type, len(block_opcode_Sequence), ' '.join(block_opcode_Sequence)])
|
||||||
except:
|
except:
|
||||||
print("Error: get function list failed")
|
print("Error: get function list failed")
|
||||||
return opcode_Sequence
|
return opcode_Sequence
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
logger = setup_logger('logger', 'log/opcode_benign.log')
|
logger = setup_logger('logger', './log/opcode_benign.log')
|
||||||
file_type = 'benign'
|
file_type = 'benign'
|
||||||
file_path = os.path.join('/mnt/d/bishe/dataset/train_benign')
|
file_path = os.path.join('/mnt/d/bishe/dataset/train_benign')
|
||||||
file_list = os.listdir(file_path)[:10000]
|
file_list = os.listdir(file_path)[:10000]
|
||||||
@ -94,12 +98,6 @@ if __name__ == '__main__':
|
|||||||
else:
|
else:
|
||||||
csv_write(f'output_{file_type}.csv', done_list)
|
csv_write(f'output_{file_type}.csv', done_list)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# node_list = []
|
# node_list = []
|
||||||
# edge_list = []
|
# edge_list = []
|
||||||
# temp_edge_list = []
|
# temp_edge_list = []
|
||||||
|
110
ngram.py
110
ngram.py
@ -6,6 +6,11 @@ import csv
|
|||||||
import argparse
|
import argparse
|
||||||
import statistics
|
import statistics
|
||||||
import plotly.express as px
|
import plotly.express as px
|
||||||
|
import concurrent.futures
|
||||||
|
from functools import partial
|
||||||
|
import logging
|
||||||
|
import contextlib
|
||||||
|
|
||||||
|
|
||||||
###################################################################################################
|
###################################################################################################
|
||||||
## Program shall take two csv files of different classes - benign and malware
|
## Program shall take two csv files of different classes - benign and malware
|
||||||
@ -16,12 +21,12 @@ import plotly.express as px
|
|||||||
# --------------------------------------------------------------------------------------------------
|
# --------------------------------------------------------------------------------------------------
|
||||||
## Generate ngrams given the corpus and factor n
|
## Generate ngrams given the corpus and factor n
|
||||||
def generate_N_grams(corpus, n=1):
|
def generate_N_grams(corpus, n=1):
|
||||||
|
|
||||||
words = [word for word in corpus.split(" ")]
|
words = [word for word in corpus.split(" ")]
|
||||||
temp = zip(*[words[i:] for i in range(0, n)])
|
temp = zip(*[words[i:] for i in range(0, n)])
|
||||||
ngram = [' '.join(n) for n in temp]
|
ngram = [' '.join(n) for n in temp]
|
||||||
return ngram
|
return ngram
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------------------------------------
|
# --------------------------------------------------------------------------------------------------
|
||||||
## Creates ngrams for the corpus List for given N and Filters it based on following criteria
|
## Creates ngrams for the corpus List for given N and Filters it based on following criteria
|
||||||
# file count >= percent of Total corpus len (pecent in [1..100])
|
# file count >= percent of Total corpus len (pecent in [1..100])
|
||||||
@ -31,8 +36,10 @@ def filter_N_grams (corpusList, N, percent, filterFreq=0):
|
|||||||
total = len(corpusList)
|
total = len(corpusList)
|
||||||
ngramDictionary = defaultdict(int)
|
ngramDictionary = defaultdict(int)
|
||||||
ngramFileCount = defaultdict(int)
|
ngramFileCount = defaultdict(int)
|
||||||
for idx in tqdm(range(0, total), ncols=100, desc="Computing ngrams"):
|
for idx in range(0, total):
|
||||||
opcodes = corpusList[idx]
|
opcodes = corpusList[idx]
|
||||||
|
if type(opcodes) is not str:
|
||||||
|
continue
|
||||||
for item in generate_N_grams(opcodes, N):
|
for item in generate_N_grams(opcodes, N):
|
||||||
# compute frequency of all unique ngrams
|
# compute frequency of all unique ngrams
|
||||||
if len(opcodes) == 0:
|
if len(opcodes) == 0:
|
||||||
@ -58,7 +65,8 @@ def filter_N_grams (corpusList, N, percent, filterFreq=0):
|
|||||||
filteredNgramDictionary.pop(item)
|
filteredNgramDictionary.pop(item)
|
||||||
|
|
||||||
# print(f"Total ngrams:{len(ngramDictionary.items())} => filtered: {len(filteredNgramDictionary.items())}\n")
|
# print(f"Total ngrams:{len(ngramDictionary.items())} => filtered: {len(filteredNgramDictionary.items())}\n")
|
||||||
return [ngramDictionary, filteredNgramDictionary]
|
return ngramDictionary, filteredNgramDictionary
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------------------------------------
|
# --------------------------------------------------------------------------------------------------
|
||||||
# Calculate a normalization factor for frequency values of class1 and class2
|
# Calculate a normalization factor for frequency values of class1 and class2
|
||||||
@ -70,6 +78,7 @@ def normalization_factor(class1, class2):
|
|||||||
mean2 = statistics.mean(class2)
|
mean2 = statistics.mean(class2)
|
||||||
return mean1 / mean2
|
return mean1 / mean2
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------------------------------------
|
# --------------------------------------------------------------------------------------------------
|
||||||
# Write the data into the given csv file handle
|
# Write the data into the given csv file handle
|
||||||
def WriteCSV(file, csvFields, dataDictionary):
|
def WriteCSV(file, csvFields, dataDictionary):
|
||||||
@ -77,18 +86,43 @@ def WriteCSV (file, csvFields, dataDictionary):
|
|||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
writer.writerows(dataDictionary)
|
writer.writerows(dataDictionary)
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||||
|
def process_csv_file(csvfile, ngram_type, file_percent_filter, frequency_filter):
|
||||||
|
"""处理CSV文件并并行计算n-gram"""
|
||||||
|
print(f"start load csv file:{os.path.basename(csvfile)}")
|
||||||
|
dataframe = pd.read_csv(csvfile, encoding="utf8")
|
||||||
|
print(f"end load")
|
||||||
|
ngram_list = defaultdict(int)
|
||||||
|
filtered_ngram_list = defaultdict(int)
|
||||||
|
process_bar = tqdm(total=len(dataframe['corpus'].values), desc=f'Computing {ngram_type}-gram on files')
|
||||||
|
with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor: # 调整线程池大小
|
||||||
|
future_to_args = {
|
||||||
|
executor.submit(filter_N_grams, dataframe['corpus'].values[start: start + 10000],
|
||||||
|
idx + 1, file_percent_filter, frequency_filter): start for start in
|
||||||
|
range(0, len(dataframe['corpus'].values), 10000)
|
||||||
|
}
|
||||||
|
for future in concurrent.futures.as_completed(future_to_args):
|
||||||
|
try:
|
||||||
|
sub_ngram_list, sub_filtered_ngram_list = future.result()
|
||||||
|
for i in [sub_ngram_list, ngram_list]:
|
||||||
|
for key, value in i.items():
|
||||||
|
ngram_list[key] += value
|
||||||
|
for i in [sub_filtered_ngram_list, filtered_ngram_list]:
|
||||||
|
for key, value in i.items():
|
||||||
|
filtered_ngram_list[key] += value
|
||||||
|
process_bar.update(10000) # 手动更新进度条
|
||||||
|
except Exception as exc:
|
||||||
|
logging.error(f"Error processing {idx + 1}-gram: {exc}")
|
||||||
|
return ngram_list, filtered_ngram_list
|
||||||
|
|
||||||
# --------------------------------------------------------------------------------------------------
|
# --------------------------------------------------------------------------------------------------
|
||||||
# Execution starts here
|
# Execution starts here
|
||||||
# Add command line arguments
|
# Add command line arguments
|
||||||
# CSV header: class, sub-class, size, corpus
|
# CSV header: class, sub-class, size, corpus
|
||||||
parser = argparse.ArgumentParser(description="ngram analysis on a given corpus csv file.")
|
|
||||||
parser.add_argument('malware_csvfile', help='path to the malware corpus csv file')
|
|
||||||
parser.add_argument('benign_csvfile', help='path to the benign corpus csv file')
|
|
||||||
parser.add_argument('ngram', help='ngram to compute, higher value will be compute intensive')
|
|
||||||
|
|
||||||
# Execute the parse_args() method
|
# Execute the parse_args() method
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
# Get user arguments
|
# Get user arguments
|
||||||
malware_csvfile = os.path.join('./out/output_malware.csv')
|
malware_csvfile = os.path.join('./out/output_malware.csv')
|
||||||
benign_csvfile = os.path.join('./out/output_benign.csv')
|
benign_csvfile = os.path.join('./out/output_benign.csv')
|
||||||
@ -100,11 +134,6 @@ if not (os.path.isfile(malware_csvfile) and os.path.isfile(benign_csvfile)):
|
|||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
# Read the csv file using pandas into data frame
|
# Read the csv file using pandas into data frame
|
||||||
try:
|
|
||||||
malwareDF = pd.read_csv(malware_csvfile, encoding = "utf8")
|
|
||||||
benignDF = pd.read_csv(benign_csvfile, encoding="utf8")
|
|
||||||
except Exception as error:
|
|
||||||
print(error)
|
|
||||||
|
|
||||||
# Build a frequency list for ngrams
|
# Build a frequency list for ngrams
|
||||||
filePercentFilter = 80 ## select ngrams present in x% of files
|
filePercentFilter = 80 ## select ngrams present in x% of files
|
||||||
@ -118,10 +147,14 @@ filteredBenignNgram = defaultdict(int) ## filtered list of ngrams from benign
|
|||||||
## common list ngrams from both malware and benign corpus with relative frequency (benignFreq - malwareFreq)
|
## common list ngrams from both malware and benign corpus with relative frequency (benignFreq - malwareFreq)
|
||||||
filteredMergedNgram = defaultdict(int)
|
filteredMergedNgram = defaultdict(int)
|
||||||
|
|
||||||
|
|
||||||
# run for only the maxgram provided, change lower value to 0 to run for all values [1..N]
|
# run for only the maxgram provided, change lower value to 0 to run for all values [1..N]
|
||||||
for idx in range(maxgrams - 1, maxgrams):
|
for idx in range(maxgrams - 1, maxgrams):
|
||||||
print(f"Computing {idx + 1}gram on files ...")
|
print(f"Computing {idx + 1}gram on files ...")
|
||||||
|
print(f"CPU core {os.cpu_count()} on use")
|
||||||
|
malwareNgram = []
|
||||||
|
filteredMalwareNgram = []
|
||||||
|
benignNgram = []
|
||||||
|
filteredBenignNgram = []
|
||||||
malwareNgram.clear()
|
malwareNgram.clear()
|
||||||
filteredMalwareNgram.clear()
|
filteredMalwareNgram.clear()
|
||||||
benignNgram.clear()
|
benignNgram.clear()
|
||||||
@ -129,19 +162,13 @@ for idx in range(maxgrams-1, maxgrams):
|
|||||||
filteredMergedNgram.clear()
|
filteredMergedNgram.clear()
|
||||||
|
|
||||||
# opcodes decoded from pe file in sequence is stored as corpus in the csv
|
# opcodes decoded from pe file in sequence is stored as corpus in the csv
|
||||||
[malwareNgram, filteredMalwareNgram] = filter_N_grams(malwareDF['corpus'].values, idx+1,
|
malwareNgram, filteredMalwareNgram = process_csv_file(malware_csvfile, 'malware', filePercentFilter, frequencyFilter)
|
||||||
filePercentFilter, frequencyFilter)
|
|
||||||
|
|
||||||
[benignNgram, filteredBenignNgram] = filter_N_grams(benignDF['corpus'].values, idx+1,
|
benignNgram, filteredBenignNgram = process_csv_file(benign_csvfile, 'benign', filePercentFilter, frequencyFilter)
|
||||||
filePercentFilter, frequencyFilter)
|
|
||||||
|
|
||||||
# creates a sorted list of ngram tuples with their frequency for 1 .. maxgram
|
# creates a sorted list of ngram tuples with their frequency for 1 .. maxgram
|
||||||
print(f"Malware: {idx+1}gramCnt={len(malwareNgram.items())}, filterenCnt={len(filteredMalwareNgram.items())}")
|
|
||||||
print(f"Benign: {idx+1}gramCnt={len(benignNgram.items())}, filterenCnt={len(filteredBenignNgram.items())}")
|
|
||||||
|
|
||||||
## Make a intersection of filtered list between malware and benign ngrams
|
|
||||||
mergedList = list(set().union(filteredMalwareNgram.keys(), filteredBenignNgram.keys()))
|
mergedList = list(set().union(filteredMalwareNgram.keys(), filteredBenignNgram.keys()))
|
||||||
|
|
||||||
## Now find the relative frequency b/w benign and malware files. = benign - malware
|
## Now find the relative frequency b/w benign and malware files. = benign - malware
|
||||||
## write this for cases where ngrams only present in one of the clases malware or benign
|
## write this for cases where ngrams only present in one of the clases malware or benign
|
||||||
## for reusability in case a union of classes is taken.
|
## for reusability in case a union of classes is taken.
|
||||||
@ -170,24 +197,23 @@ for idx in range(maxgrams-1, maxgrams):
|
|||||||
# color labels as 'a' + frequency % 26
|
# color labels as 'a' + frequency % 26
|
||||||
# size as frequency/max * 100
|
# size as frequency/max * 100
|
||||||
# hover name is ngram name
|
# hover name is ngram name
|
||||||
titlestr = str(idx+1) + "gram: Total samples(" + str(len(sortedMergedNgramList)) + ")"
|
# titlestr = str(idx + 1) + "gram: Total samples(" + str(len(sortedMergedNgramList)) + ")"
|
||||||
htmlfile = str (idx+1) +"gram.html"
|
# htmlfile = str(idx + 1) + "gram.html"
|
||||||
hovername = [item[0] for item in sortedMergedNgramList]
|
# hovername = [item[0] for item in sortedMergedNgramList]
|
||||||
yval = [item[1] for item in sortedMergedNgramList]
|
# yval = [item[1]/1e10 for item in sortedMergedNgramList]
|
||||||
xval = []
|
# xval = []
|
||||||
for key in hovername:
|
# for key in hovername:
|
||||||
xval.append(max(filteredMalwareNgram[key], filteredBenignNgram[key]))
|
# xval.append(max(filteredMalwareNgram[key], filteredBenignNgram[key]))
|
||||||
colors = [chr(ord('a')+ (value %26)) for value in xval]
|
# colors = [chr(ord('a') + (value % 26)) for value in xval]
|
||||||
maxval = max(xval)
|
# maxval = max(xval)
|
||||||
sizeval = [(int((val/maxval)*100)+1) for val in xval]
|
# sizeval = [(int((val / maxval) * 100) + 1) for val in xval]
|
||||||
|
#
|
||||||
fig = px.scatter(title=titlestr, y=yval, x=xval, color=colors,
|
# fig = px.scatter(title=titlestr, y=yval, x=xval, color=colors,
|
||||||
size=sizeval, hover_name=hovername, log_x=True,
|
# size=sizeval, hover_name=hovername, log_x=True,
|
||||||
labels = {
|
# labels={
|
||||||
"x": "Absolute Frequency",
|
# "x": "Absolute Frequency",
|
||||||
"y": "Relative Frequency"})
|
# "y": "Relative Frequency"})
|
||||||
fig.show()
|
# fig.write_html(htmlfile)
|
||||||
fig.write_html(htmlfile)
|
|
||||||
|
|
||||||
# write the final ngrams into a file for feature selection
|
# write the final ngrams into a file for feature selection
|
||||||
ngramDictList = []
|
ngramDictList = []
|
||||||
@ -199,11 +225,11 @@ for idx in range(maxgrams-1, maxgrams):
|
|||||||
ngramDictList.append(dictItem)
|
ngramDictList.append(dictItem)
|
||||||
|
|
||||||
csvfields = ['ngram', 'count']
|
csvfields = ['ngram', 'count']
|
||||||
csvname = str(idx+1) + "gram.csv"
|
csvname = "./out/"+str(idx + 1) + "gram.csv"
|
||||||
|
print("*======================start write csv=======================================*")
|
||||||
try:
|
try:
|
||||||
csvfile = open(csvname, 'w')
|
csvfile = open(csvname, 'w')
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
print(f"Error: writing csvfile {err}")
|
print(f"Error: writing csvfile {err}")
|
||||||
WriteCSV(csvfile, csvfields, ngramDictList)
|
WriteCSV(csvfile, csvfields, ngramDictList)
|
||||||
csvfile.close()
|
csvfile.close()
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user