#!/usr/bin/env python # coding: utf-8 # In[1]: #!/usr/bin/env python # coding: utf-8 # In[1]: import pandas as pd import numpy as np from mlxtend.preprocessing import TransactionEncoder from mlxtend.frequent_patterns import association_rules, fpgrowth from prefixspan import PrefixSpan df = pd.read_csv("ts_data_accident-2020_sample.csv", low_memory=False, encoding='ISO-8859-1') pd.set_option('display.max_columns',None) df=df[['RISK_V2','INST_NM','DRULE_ATT_TYPE_CODE1','TW_ATT_IP','TW_ATT_PORT','TW_DMG_IP','TW_DMG_PORT','ACCD_DMG_PROTO_NM','TW_ATT_CT_NM','ACCD_FIND_MTD_CODE','DRULE_NM']].dropna() len(df) ##################### NTM section ##################### NTM_df=df[df['ACCD_FIND_MTD_CODE']==1] NTM_df # In[2]: # Pick out it in order to get the asset, risk, intent, black IP out RISK_V2=NTM_df['RISK_V2'] RISK_V2_FILTERED=RISK_V2.dropna() ## 결측값 제거. import json from pandas import json_normalize # modified def get_asset_desc(asset_field): if asset_field == 'ASSETS_VAL_1': return '공인-전체IP대역(유선)' elif asset_field == 'ASSETS_VAL_2': return '공인-전체IP대역(무선)' elif asset_field == 'ASSETS_VAL_3': return '공인-WEB서버' elif asset_field == 'ASSETS_VAL_4': return '공인-내부응용서버' elif asset_field == 'ASSETS_VAL_5': return '공인-DB서버' elif asset_field == 'ASSETS_VAL_6': return '공인-패치서버' elif asset_field == 'ASSETS_VAL_7': return '공인-네트워크' elif asset_field == 'ASSETS_VAL_8': return '공인-보안' elif asset_field == 'ASSETS_VAL_9': return '공인-업무용PC' elif asset_field == 'ASSETS_VAL_10': return '공인-비업무용PC' elif asset_field == 'ASSETS_VAL_11': return '공인-기타' elif asset_field == 'ASSETS_VAL_12': return '사설-전체IP대역(유선)' elif asset_field == 'ASSETS_VAL_13': return '사설-전체IP대역(무선)' elif asset_field == 'ASSETS_VAL_14': return '사설-WEB서버' elif asset_field == 'ASSETS_VAL_15': return '사설-내부응용서버' elif asset_field == 'ASSETS_VAL_16': return '사설-DB서버' elif asset_field == 'ASSETS_VAL_17': return '사설-패치서버' elif asset_field == 'ASSETS_VAL_18': return '사설-네트워크' elif asset_field == 'ASSETS_VAL_19': return '사설-보안' elif asset_field == 'ASSETS_VAL_20': return '사설-업무용PC' elif asset_field == 'ASSETS_VAL_21': return '사설-비업무용PC' elif asset_field == 'ASSETS_VAL_22': return '사설-기타' else: return '' def get_intent_desc(intent_field): if intent_field == 'INTENT_VAL_1': return '파괴' elif intent_field == 'INTENT_VAL_2': return '유출' elif intent_field == 'INTENT_VAL_3': return '지연' elif intent_field == 'INTENT_VAL_4': return '잠복' elif intent_field == 'INTENT_VAL_5': return '단순침입' elif intent_field == 'INTENT_VAL_6': return 'MD5' elif intent_field == 'INTENT_VAL_0': return 'Default' else: return '' def get_source_desc(source_field): if source_field=='SOURCE_VAL_1': return '북한IP' if source_field=='SOURCE_VAL_3': return 'ECSC Black IP' else: return '' # New assets column ## ASSETS_VAL을 아예 JSON항목으로 만들어서 새로운 데이터프레임으로 생성. risk_df = pd.DataFrame() for risk in RISK_V2_FILTERED: risk = risk.replace("'", "\"") #json으로 만들려고. json_string = json.loads(risk) json_df = json_normalize(json_string) risk_df = pd.concat([risk_df,json_df],ignore_index=True) #DataFrame 합쳐주기. ignore_index = True를 해야 index가 재구성 된다. risk_df_column_names = risk_df.columns assets_df = [] intents_df = [] sources_df = [] def filter_all(risk): for i in range(0,len(risk)): risks=[] intents=[] sources=[] for column in risk_df_column_names: # filter_asset if 'ASSETS_VAL_' in column and risk.iloc[i][column]: risk_key_desc = 'RISK_V2.' + column + '=' + get_asset_desc(column) risks.append(risk_key_desc) # filter_intent if 'INTENT_VAL_' in column and risk.iloc[i][column]: intent_key_desc = 'RISK_V2.' + column + '=' + get_intent_desc(column) intents.append(intent_key_desc) if 'SOURCE_VAL_' in column and risk.iloc[i][column]: source_key_desc='RISK_V2.' + column + '=' + get_source_desc(column) sources.append(source_key_desc) assets_df.append(risks) intents_df.append(intents) sources_df.append(sources) filter_all(risk_df) ## 여기까지 내가 만든 것. # In[3]: NTM_df['ASSETS_VAL'] = assets_df NTM_df['ASSETS_VAL'] = NTM_df['ASSETS_VAL'].astype(str) NTM_df['ASSETS_VAL'] = NTM_df['ASSETS_VAL'].str.replace('[','',regex=True) NTM_df['ASSETS_VAL'] = NTM_df['ASSETS_VAL'].str.replace(']','',regex=True) NTM_df['ASSETS_VAL'] # In[4]: NTM_df['INTENT_VAL'] = intents_df NTM_df['INTENT_VAL'] = NTM_df['INTENT_VAL'].astype(str) NTM_df['INTENT_VAL'] = NTM_df['INTENT_VAL'].str.replace('[','',regex=True) NTM_df['INTENT_VAL'] = NTM_df['INTENT_VAL'].str.replace(']','',regex=True) NTM_df['INTENT_VAL'] # In[5]: NTM_df['SOURCE_VAL'] = sources_df NTM_df['SOURCE_VAL'] = NTM_df['SOURCE_VAL'].astype(str) NTM_df['SOURCE_VAL'] = NTM_df['SOURCE_VAL'].str.replace('[','',regex=True) NTM_df['SOURCE_VAL'] = NTM_df['SOURCE_VAL'].str.replace(']','',regex=True) NTM_df['SOURCE_VAL'] # In[8]: NTM_df.drop(columns=['RISK_V2'], inplace=True) # In[12]: ##################### 여기서부터 진행하시면 됩니다. ##################### ##################### 아래 12개 아이템(12. 장비 ACCD_FIND_MTD_CODE 제외)에 대해서 모든 아이템 조합에 알고리즘 적용하기##################### # It should be 13 columns in total # 1. 기관 INST_NM # 2. 공격 DRULE_ATT_TYPE_CODE1 # 3. 자산 ASSETS_VAL # 4. 위협공격ip TW_ATT_IP # 5. 위협공격port TW_ATT_PORT # 6. 위협피해ip TW_DMG_IP # 7. 위협피해port TW_DMG_PORT # 8. 위협피해프로토콜 ACCD_DMG_PROTO_NM # 9. 공격국가 TW_ATT_CT_NM # 10. 의도(7개) INTENT_VAL # 11. IP/URL 가중치 SOURCE_VAL # 12. 장비 ACCD_FIND_MTD_CODE # 13. 탐지규칙명 DRULE_NM NTM_df.isna().sum() # Change the Nan to zero NTM_df['ACCD_DMG_PROTO_NM']=NTM_df['ACCD_DMG_PROTO_NM'].replace(np.nan,'') NTM_df['INST_NM']=NTM_df['INST_NM'].replace(np.nan,'') NTM_df['DRULE_ATT_TYPE_CODE1']=NTM_df['DRULE_ATT_TYPE_CODE1'].replace(np.nan,'') NTM_df['TW_ATT_IP']=NTM_df['TW_ATT_IP'].replace(np.nan,0) NTM_df['TW_ATT_PORT']=NTM_df['TW_ATT_PORT'].replace(np.nan,0) NTM_df['TW_DMG_IP']=NTM_df['TW_DMG_IP'].replace(np.nan,0) NTM_df['TW_DMG_PORT']=NTM_df['TW_DMG_PORT'].replace(np.nan,0) NTM_df['TW_ATT_CT_NM']=NTM_df['TW_ATT_CT_NM'].replace(np.nan,'') NTM_df['ASSETS_VAL']=NTM_df['ASSETS_VAL'].replace(np.nan,0) NTM_df['INTENT_VAL']=NTM_df['INTENT_VAL'].replace(np.nan,0) NTM_df['SOURCE_VAL']=NTM_df['SOURCE_VAL'].replace(np.nan,0) NTM_df['DRULE_NM']=NTM_df['DRULE_NM'].replace(np.nan,'') # Check NaN out again NTM_df.isna().sum() copy_df = NTM_df.copy() copy_df.drop(columns=['ACCD_FIND_MTD_CODE'],inplace=True) copy_df.columns=['item1','item2','item3','item4','item5','item6','item7','item8','item9','item10','item11','item12'] data_len = len(NTM_df) hwan_list = [] # Combination import itertools arr = ['item1','item2','item3','item4','item5','item6','item7','item8','item9','item10','item11'] nCr = list(itertools.combinations(arr,6)) # item들은 이 순서다. #item1 = 'INST_NM' #item2 = 'DRULE_ATT_TYPE_CODE1' #item3 = 'TW_ATT_IP' #item4 = 'TW_ATT_PORT' #item5 = 'TW_DMG_IP' #item6 = 'TW_DMG_PORT' #item7 = 'ACCD_DMG_PROTO_NM' #item8 = 'TW_ATT_CT_NM' #item9 = 'DRULE_NM' #item10 = 'ASSETS_VAL' #item11 = 'INTENT_VAL' #item12 = 'SOURCE_VAL' for i in range(0,data_len): # item들은 이 순서다. item1 = NTM_df.loc[i]['INST_NM'] item2 = NTM_df.loc[i]['DRULE_ATT_TYPE_CODE1'] item3 = NTM_df.loc[i]['TW_ATT_IP'] item4 = NTM_df.loc[i]['TW_ATT_PORT'] item5 = NTM_df.loc[i]['TW_DMG_IP'] item6 = NTM_df.loc[i]['TW_DMG_PORT'] item7 = NTM_df.loc[i]['ACCD_DMG_PROTO_NM'] item8 = NTM_df.loc[i]['TW_ATT_CT_NM'] item9 = NTM_df.loc[i]['DRULE_NM'] item10 = NTM_df.loc[i]['ASSETS_VAL'] item11 = NTM_df.loc[i]['INTENT_VAL'] item12 = NTM_df.loc[i]['SOURCE_VAL'] not_null_arr = [] ## 리스트안에 빈 값을 빼버리자. null_check_list = [item1,item2,item3,item4,item5,item6,item7,item8,item9,item10,item11,item12] for item in null_check_list: if item and item != '[]': not_null_arr.append(item) hwan_list.append(not_null_arr) new_ps = PrefixSpan(hwan_list) copy_df # In[23]: comlist = [] for n in range(0,3): for i in range(0,data_len): itemlist = [] locdata = copy_df.iloc[i] for item in nCr[n]: itemlist.append(locdata[item]) comlist.append(itemlist) comlist #아이템들의 조합. nCr을 한 아이템들의 조합들. 이걸로 순서를 찾아보자. # In[25]: ## 여기도 내 코드 test_ntm = new_ps.frequent(1,filter = lambda patt, matches:len(patt)>5) test_ntm_df = pd.DataFrame(test_ntm) test_ntm_df.rename(columns={0:'Frequency', 1:'Cause'}, inplace=True) # Make the new column for filling the Effect test_ntm_df['Effect']=np.nan # Change the order of columns test_ntm_df=test_ntm_df[['Cause','Effect','Frequency']] test_sort_values = test_ntm_df.sort_values(by=['Frequency'],ascending=False,ignore_index=True) ## # In[26]: prefix_NTM_df = test_sort_values.copy() prefix_NTM_df # In[ ]: # Define the function that find the rule name # 데이터 크기를 줄여서 실행해본 결과 정상 작동함. for i in range(0,len(prefix_NTM_df)): drules=['Attack','DDOS','HACK','MAIL','Malwr','WEB'] loc_value = prefix_NTM_df.loc[i] for item in prefix_NTM_df.loc[i,'Cause']: for drule in drules: if item == drule: prefix_NTM_df.loc[i,'Effect'] = drule break # In[27]: prefix_NTM_df['Cause'] = [','.join(map(str, word))for word in prefix_NTM_df['Cause']] # Cause Column을 하나의 string으로 변환. # In[ ]: # 정규표현식 사용해서 매칭하기. # 정규표현식 사용하는 틀. words에 배열만 넣으면 된다. def regbase(words): base = r'^{}' expr = '(?=.*{})' ret = base.format(''.join(expr.format(w) for w in words)) return ret for i in range(0,20): print(comlist[i]) print(prefix_NTM_df[prefix_NTM_df['Cause'].str.contains(regbase(comlist[i]),na=False,regex=True)]) # In[ ]: