#!/usr/bin/env python # coding: utf-8 # In[1]: import pandas as pd import numpy as np from mlxtend.preprocessing import TransactionEncoder from mlxtend.frequent_patterns import association_rules, fpgrowth from prefixspan import PrefixSpan df = pd.read_csv("ts_data_accident-2020_sample.csv", low_memory=False, encoding='ISO-8859-1') pd.set_option('display.max_columns',None) df=df[['RISK_V2','INST_NM','DRULE_ATT_TYPE_CODE1','TW_ATT_IP','TW_ATT_PORT','TW_DMG_IP','TW_DMG_PORT','ACCD_DMG_PROTO_NM','TW_ATT_CT_NM','ACCD_FIND_MTD_CODE','DRULE_NM']].dropna() len(df) ##################### NTM section ##################### NTM_df=df[df['ACCD_FIND_MTD_CODE']==1] NTM_df # Pick out it in order to get the asset, risk, intent, black IP out RISK_V2=NTM_df['RISK_V2'] RISK_V2_FILTERED=RISK_V2.dropna() ## 결측값 제거. import json from pandas import json_normalize # modified def get_asset_desc(asset_field): if asset_field == 'ASSETS_VAL_1': return '공인-전체IP대역(유선)' elif asset_field == 'ASSETS_VAL_2': return '공인-전체IP대역(무선)' elif asset_field == 'ASSETS_VAL_3': return '공인-WEB서버' elif asset_field == 'ASSETS_VAL_4': return '공인-내부응용서버' elif asset_field == 'ASSETS_VAL_5': return '공인-DB서버' elif asset_field == 'ASSETS_VAL_6': return '공인-패치서버' elif asset_field == 'ASSETS_VAL_7': return '공인-네트워크' elif asset_field == 'ASSETS_VAL_8': return '공인-보안' elif asset_field == 'ASSETS_VAL_9': return '공인-업무용PC' elif asset_field == 'ASSETS_VAL_10': return '공인-비업무용PC' elif asset_field == 'ASSETS_VAL_11': return '공인-기타' elif asset_field == 'ASSETS_VAL_12': return '사설-전체IP대역(유선)' elif asset_field == 'ASSETS_VAL_13': return '사설-전체IP대역(무선)' elif asset_field == 'ASSETS_VAL_14': return '사설-WEB서버' elif asset_field == 'ASSETS_VAL_15': return '사설-내부응용서버' elif asset_field == 'ASSETS_VAL_16': return '사설-DB서버' elif asset_field == 'ASSETS_VAL_17': return '사설-패치서버' elif asset_field == 'ASSETS_VAL_18': return '사설-네트워크' elif asset_field == 'ASSETS_VAL_19': return '사설-보안' elif asset_field == 'ASSETS_VAL_20': return '사설-업무용PC' elif asset_field == 'ASSETS_VAL_21': return '사설-비업무용PC' elif asset_field == 'ASSETS_VAL_22': return '사설-기타' else: return '' def get_intent_desc(intent_field): if intent_field == 'INTENT_VAL_1': return '파괴' elif intent_field == 'INTENT_VAL_2': return '유출' elif intent_field == 'INTENT_VAL_3': return '지연' elif intent_field == 'INTENT_VAL_4': return '잠복' elif intent_field == 'INTENT_VAL_5': return '단순침입' elif intent_field == 'INTENT_VAL_6': return 'MD5' elif intent_field == 'INTENT_VAL_0': return 'Default' else: return '' def get_source_desc(source_field): if source_field=='SOURCE_VAL_1': return '북한IP' if source_field=='SOURCE_VAL_3': return 'ECSC Black IP' else: return '' # New assets column ## ASSETS_VAL을 아예 JSON항목으로 만들어서 새로운 데이터프레임으로 생성. risk_df = pd.DataFrame() for risk in RISK_V2_FILTERED: risk = risk.replace("'", "\"") #json으로 만들려고. json_string = json.loads(risk) json_df = json_normalize(json_string) risk_df = pd.concat([risk_df,json_df],ignore_index=True) #DataFrame 합쳐주기. ignore_index = True를 해야 index가 재구성 된다. risk_df_column_names = risk_df.columns assets_df = [] intents_df = [] sources_df = [] def filter_all(risk): for i in range(0,len(risk)): risks=[] intents=[] sources=[] for column in risk_df_column_names: # filter_asset if 'ASSETS_VAL_' in column and risk.iloc[i][column]: risk_key_desc = 'RISK_V2.' + column + '=' + get_asset_desc(column) risks.append(risk_key_desc) # filter_intent if 'INTENT_VAL_' in column and risk.iloc[i][column]: intent_key_desc = 'RISK_V2.' + column + '=' + get_intent_desc(column) intents.append(intent_key_desc) if 'SOURCE_VAL_' in column and risk.iloc[i][column]: source_key_desc='RISK_V2.' + column + '=' + get_source_desc(column) sources.append(source_key_desc) assets_df.append(risks) intents_df.append(intents) sources_df.append(sources) filter_all(risk_df) ## 여기까지 내가 만든 것. NTM_df['ASSETS_VAL'] = assets_df NTM_df['ASSETS_VAL'] = NTM_df['ASSETS_VAL'].astype(str) NTM_df['ASSETS_VAL'] = NTM_df['ASSETS_VAL'].str.replace('[','',regex=True) NTM_df['ASSETS_VAL'] = NTM_df['ASSETS_VAL'].str.replace(']','',regex=True) NTM_df['ASSETS_VAL'] NTM_df['INTENT_VAL'] = intents_df NTM_df['INTENT_VAL'] = NTM_df['INTENT_VAL'].astype(str) NTM_df['INTENT_VAL'] = NTM_df['INTENT_VAL'].str.replace('[','',regex=True) NTM_df['INTENT_VAL'] = NTM_df['INTENT_VAL'].str.replace(']','',regex=True) NTM_df['INTENT_VAL'] NTM_df['SOURCE_VAL'] = sources_df NTM_df['SOURCE_VAL'] = NTM_df['SOURCE_VAL'].astype(str) NTM_df['SOURCE_VAL'] = NTM_df['SOURCE_VAL'].str.replace('[','',regex=True) NTM_df['SOURCE_VAL'] = NTM_df['SOURCE_VAL'].str.replace(']','',regex=True) NTM_df['SOURCE_VAL'] NTM_df.drop(columns=['RISK_V2'], inplace=True) ##################### 여기서부터 진행하시면 됩니다. ##################### ##################### 아래 12개 아이템(12. 장비 ACCD_FIND_MTD_CODE 제외)에 대해서 모든 아이템 조합에 알고리즘 적용하기##################### # It should be 13 columns in total # 1. 기관 INST_NM # 2. 공격 DRULE_ATT_TYPE_CODE1 # 3. 자산 ASSETS_VAL # 4. 위협공격ip TW_ATT_IP # 5. 위협공격port TW_ATT_PORT # 6. 위협피해ip TW_DMG_IP # 7. 위협피해port TW_DMG_PORT # 8. 위협피해프로토콜 ACCD_DMG_PROTO_NM # 9. 공격국가 TW_ATT_CT_NM # 10. 의도(7개) INTENT_VAL # 11. IP/URL 가중치 SOURCE_VAL # 12. 장비 ACCD_FIND_MTD_CODE # 13. 탐지규칙명 DRULE_NM NTM_df.isna().sum() # Change the Nan to zero NTM_df['ACCD_DMG_PROTO_NM']=NTM_df['ACCD_DMG_PROTO_NM'].replace(np.nan,'') NTM_df['INST_NM']=NTM_df['INST_NM'].replace(np.nan,'') NTM_df['DRULE_ATT_TYPE_CODE1']=NTM_df['DRULE_ATT_TYPE_CODE1'].replace(np.nan,'') NTM_df['TW_ATT_IP']=NTM_df['TW_ATT_IP'].replace(np.nan,0) NTM_df['TW_ATT_PORT']=NTM_df['TW_ATT_PORT'].replace(np.nan,0) NTM_df['TW_DMG_IP']=NTM_df['TW_DMG_IP'].replace(np.nan,0) NTM_df['TW_DMG_PORT']=NTM_df['TW_DMG_PORT'].replace(np.nan,0) NTM_df['TW_ATT_CT_NM']=NTM_df['TW_ATT_CT_NM'].replace(np.nan,'') NTM_df['ASSETS_VAL']=NTM_df['ASSETS_VAL'].replace(np.nan,0) NTM_df['INTENT_VAL']=NTM_df['INTENT_VAL'].replace(np.nan,0) NTM_df['SOURCE_VAL']=NTM_df['SOURCE_VAL'].replace(np.nan,0) NTM_df['DRULE_NM']=NTM_df['DRULE_NM'].replace(np.nan,'') NTM_df['TW_ATT_IP']=NTM_df['TW_ATT_IP'] NTM_df['TW_ATT_PORT']=NTM_df['TW_ATT_PORT'] NTM_df['TW_DMG_IP']=NTM_df['TW_DMG_IP'] NTM_df['TW_DMG_PORT']=NTM_df['TW_DMG_PORT'] # Check NaN out again NTM_df.isna().sum() copy_df = NTM_df.copy() #원본도 안건드리고, 실행시킬 때마다 오류 떠서 copy로 하는게 좋을 것 같다. copy_df.drop(columns=['ACCD_FIND_MTD_CODE'],inplace=True) data_len = len(NTM_df) # Combination import itertools # Combination 조합들 생성하는 함수. row마다 mCn 생성. def get_comb_df(df, n): nCr = list(itertools.combinations(df.columns.tolist(),n)) nCr = [column for column in nCr if 'DRULE_ATT_TYPE_CODE1' in column] ret_list = [] for l in range(len(nCr)): for i in range(len(df)): temp = [] temp_df = df.loc[i] for col in nCr[l]: new_string = col new_string = new_string + ":" + str(temp_df[col]) temp.append(new_string) ret_list.append(temp) return ret_list # item들은 이 순서다. #item1 = 'INST_NM' #item2 = 'DRULE_ATT_TYPE_CODE1' #item3 = 'TW_ATT_IP' #item4 = 'TW_ATT_PORT' #item5 = 'TW_DMG_IP' #item6 = 'TW_DMG_PORT' #item7 = 'ACCD_DMG_PROTO_NM' #item8 = 'TW_ATT_CT_NM' #item9 = 'DRULE_NM' #item10 = 'ASSETS_VAL' #item11 = 'INTENT_VAL' #item12 = 'SOURCE_VAL' nonnull_list = [] for i in range(0,data_len): item1 = 'INST_NM:' + NTM_df.loc[i]['INST_NM'] item2 = 'DRULE_ATT_TYPE_CODE1:' + NTM_df.loc[i]['DRULE_ATT_TYPE_CODE1'] item3 = 'TW_ATT_IP:' + NTM_df.loc[i]['TW_ATT_IP'].astype(str) item4 = 'TW_ATT_PORT:' + NTM_df.loc[i]['TW_ATT_PORT'].astype(str) item5 = 'TW_DMG_IP:' + NTM_df.loc[i]['TW_DMG_IP'].astype(str) item6 = 'TW_DMG_PORT:' + NTM_df.loc[i]['TW_DMG_PORT'].astype(str) item7 = 'ACCD_DMG_PROTO_NM:' + NTM_df.loc[i]['ACCD_DMG_PROTO_NM'] item8 = 'TW_ATT_CT_NM:' + NTM_df.loc[i]['TW_ATT_CT_NM'] item9 = 'DRULE_NM:' + NTM_df.loc[i]['DRULE_NM'] item10 = NTM_df.loc[i]['ASSETS_VAL'] item11 = NTM_df.loc[i]['INTENT_VAL'] item12 = NTM_df.loc[i]['SOURCE_VAL'] not_null_arr = [] ## 리스트안에 빈 값을 빼버리자. null_check_list = [item1,item2,item3,item4,item5,item6,item7,item8,item9,item10,item11,item12] for item in null_check_list: if item and item != '[]': not_null_arr.append(item) nonnull_list.append(not_null_arr) get_comb_df(copy_df,9) def get_prefix_span(df_list, n): #n이상 길이를 갖는 규칙들만. 거기다가 Frequency기준 정렬 까지. prefix_span = PrefixSpan(df_list) n_ps = prefix_span.frequent(1,filter = lambda patt, matches:len(patt)>n) ps_df = pd.DataFrame(n_ps) ps_df.rename(columns={0:'Frequency', 1:'Cause'}, inplace=True) ps_df['Effect']= np.nan ps_df = ps_df[['Cause','Effect','Frequency']] ps_sort_df = ps_df.sort_values(by=['Frequency'],ascending=False,ignore_index=True) return ps_sort_df test = get_prefix_span(nonnull_list,8) test # Define the function that find the rule name # 데이터 크기를 줄여서 실행해본 결과 정상 작동함. def get_Effect(df): for i in range(0,10000): drules=['Attack','DDOS','HACK','MAIL','Malwr','WEB'] for item in df.loc[i,'Cause']: for drule in drules: drule_str = 'DRULE_ATT_TYPE_CODE1:' + drule if item == drule_str: df.loc[i,'Effect'] = drule break return df tdf = get_Effect(test) tdf.head(10000) # 10000개로 했을 때, DRULE_ATT_TYPE_CODE 가 있는 항목들은 Effect정상 추출. tdf = get_Effect(test) testdf = tdf.tail(1000) testdf ### Effect = NaN인 값 지우기. testdf = tdf.head(1000) test_result = testdf testdf test_result['Effect'] = test_result['Effect'].replace(np.nan,0) test_result = test_result[test_result.Effect != 0] test_result.reset_index(drop=True) # 정규표현식 사용해서 매칭하기. # 정규표현식 사용하는 틀. words에 배열만 넣으면 된다. tdf['Cause'] = [','.join(map(str, word))for word in tdf['Cause']] def regbase(words): base = r'^{}' expr = '(?=.*{})' ret = base.format(''.join(expr.format(w) for w in words)) return ret def result(n): comlist = get_comb_df(copy_df,n) for i in range(0,len(comlist)): print(comlist[i]) print(tdf[tdf['Cause'].str.contains(regbase(comlist[i]),na=False,regex=True)].reset_index(drop=True,inplace=False))