天池学习赛深度学习篇


上一篇文章讲了怎么用机器学习的方法进行阿里云安全恶意程序检测比赛,本章主要看深度学习如何实现阿里云安全恶意程序检测

TextCNN建模

数据读取

#模块引入
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt

import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder

from tqdm import tqdm_notebook
from sklearn.preprocessing import LabelBinarizer,LabelEncoder

import warnings
warnings.filterwarnings('ignore')
%matplotlib inline

#读取本地文档数据
path  = '../security_data/'
train = pd.read_csv(path + 'security_train.csv')
test  = pd.read_csv(path + 'security_test.csv')

#模块引入
import numpy as np
import pandas as pd
from tqdm import tqdm  

#存储占用大小统计
class _Data_Preprocess:
    def __init__(self):
        self.int8_max = np.iinfo(np.int8).max
        self.int8_min = np.iinfo(np.int8).min

        self.int16_max = np.iinfo(np.int16).max
        self.int16_min = np.iinfo(np.int16).min

        self.int32_max = np.iinfo(np.int32).max
        self.int32_min = np.iinfo(np.int32).min

        self.int64_max = np.iinfo(np.int64).max
        self.int64_min = np.iinfo(np.int64).min

        self.float16_max = np.finfo(np.float16).max
        self.float16_min = np.finfo(np.float16).min

        self.float32_max = np.finfo(np.float32).max
        self.float32_min = np.finfo(np.float32).min

        self.float64_max = np.finfo(np.float64).max
        self.float64_min = np.finfo(np.float64).min

    def _get_type(self, min_val, max_val, types):
        if types == 'int':
            if max_val <= self.int8_max and min_val >= self.int8_min:
                return np.int8
            elif max_val <= self.int16_max <= max_val and min_val >= self.int16_min:
                return np.int16
            elif max_val <= self.int32_max and min_val >= self.int32_min:
                return np.int32
            return None

        elif types == 'float':
            if max_val <= self.float16_max and min_val >= self.float16_min:
                return np.float16
            if max_val <= self.float32_max and min_val >= self.float32_min:
                return np.float32
            if max_val <= self.float64_max and min_val >= self.float64_min:
                return np.float64
            return None

    def _memory_process(self, df):
        init_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
        print('Original data occupies {} GB memory.'.format(init_memory))
        df_cols = df.columns

          
        for col in tqdm_notebook(df_cols):
            try:
                if 'float' in str(df[col].dtypes):
                    max_val = df[col].max()
                    min_val = df[col].min()
                    trans_types = self._get_type(min_val, max_val, 'float')
                    if trans_types is not None:
                        df[col] = df[col].astype(trans_types)
                elif 'int' in str(df[col].dtypes):
                    max_val = df[col].max()
                    min_val = df[col].min()
                    trans_types = self._get_type(min_val, max_val, 'int')
                    if trans_types is not None:
                        df[col] = df[col].astype(trans_types)
            except:
                print(' Can not do any process for column, {}.'.format(col)) 
        afterprocess_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
        print('After processing, the data occupies {} GB memory.'.format(afterprocess_memory))
        return df

memory_process = _Data_Preprocess()

train.head()


数据预处理

# (字符串转化为数字)
unique_api = train['api'].unique()

#将api以字典的形式存储
api2index = {item:(i+1) for i,item in enumerate(unique_api)}
index2api = {(i+1):item for i,item in enumerate(unique_api)}

#训练集、测试集新增api_idx字段,将api的取值映射到api2index,展示的值是api2index的values值
train['api_idx'] = train['api'].map(api2index)
test['api_idx']  = test['api'].map(api2index)

# 获取每个文件对应的字符串序列
def get_sequence(df,period_idx):
    seq_list = []

     #结合上下文代码可知:参数period_idx指的是train_period_idx/test_period_idx
     #train_period_idx/test_period_idx:获得file_id,去除重复值,仅保留第一次出现的值
     #period_idx[:-1]:获得数组的所有值
    for _id,begin in enumerate(period_idx[:-1]):
    
        # 结合上下文代码可知,参数df是train/test
        # df.iloc[begin:period_idx[_id+1]]是指获取某一file_id的所有数据
        # df.iloc[begin:period_idx[_id+1]]['api_idx']是指获取某一file_id的所有数据对应的api_idx值
        seq_list.append(df.iloc[begin:period_idx[_id+1]]['api_idx'].values)
    
    #弥补df.iloc[begin:period_idx[_id+1]]['api_idx'],将剩余的数据也获取并加入到seq_list列表中
    seq_list.append(df.iloc[period_idx[-1]:]['api_idx'].values)
    return seq_list


train_period_idx = train.file_id.drop_duplicates(keep='first').index.values
test_period_idx  = test.file_id.drop_duplicates(keep='first').index.values

#获取训练集中file_id、label,去除重复值,仅保留第一次出现的值
train_df = train[['file_id','label']].drop_duplicates(keep='first')
test_df  = test[['file_id']].drop_duplicates(keep='first')

#将以file_id划分得到的train['api_idx']的值获得的值赋值给train_df['seq']
train_df['seq'] = get_sequence(train,train_period_idx)
test_df['seq']  = get_sequence(test,test_period_idx)

到这里,train_df包含file_id、label和seq三个字段,其中seq的值是一组列表


train_df的图标输出可以理解为以file_id、label两个字段分组,展示每条数据对应api的值

TextCNN网络结构

# 模块引入
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Lambda, Embedding, Dropout, Activation,GRU,Bidirectional
from keras.layers import Conv1D,Conv2D,MaxPooling2D,GlobalAveragePooling1D,GlobalMaxPooling1D, MaxPooling1D, Flatten
from keras.layers import CuDNNGRU, CuDNNLSTM, SpatialDropout1D
from keras.layers.merge import concatenate, Concatenate, Average, Dot, Maximum, Multiply, Subtract, average
from keras.models import Model
from keras.optimizers import RMSprop,Adam
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import SGD
from keras import backend as K
from sklearn.decomposition import TruncatedSVD, NMF, LatentDirichletAllocation
from keras.layers import SpatialDropout1D
from keras.layers.wrappers import Bidirectional


# TextCNN方法
def TextCNN(max_len,max_cnt,embed_size, num_filters,kernel_size,conv_action, mask_zero):
    
    _input = Input(shape=(max_len,), dtype='int32')
    _embed = Embedding(max_cnt, embed_size, input_length=max_len, mask_zero=mask_zero)(_input)
    _embed = SpatialDropout1D(0.15)(_embed)
    warppers = []
    
    for _kernel_size in kernel_size:
        conv1d = Conv1D(filters=num_filters, kernel_size=_kernel_size, activation=conv_action)(_embed)
        warppers.append(GlobalMaxPooling1D()(conv1d))
                        
    fc = concatenate(warppers)
    fc = Dropout(0.5)(fc)
    #fc = BatchNormalization()(fc)
    fc = Dense(256, activation='relu')(fc)
    fc = Dropout(0.25)(fc)
    #fc = BatchNormalization()(fc) 
    preds = Dense(8, activation = 'softmax')(fc)
    
    model = Model(inputs=_input, outputs=preds)
    
    model.compile(loss='categorical_crossentropy',
        optimizer='adam',
        metrics=['accuracy'])
    return model

#get_dummies()作用是将一些分类变量,如性别、国家、省份、职业、婚姻状况等变量转换成多个二进制变量,
#即一个变量有多个可能的值,就可以转化为多个二进制变量,这样可以方便数据分析,
#更加准确地反应原有数据集合之间的关系。
train_labels = pd.get_dummies(train_df.label).values


#pad_sequences用于确保列表中的所有序列具有相同的长度.默认情况下,
#这是通过0在每个序列的开头填充直到每个序列与最长序列具有相同的长度来完成的
train_seq    = pad_sequences(train_df.seq.values, maxlen = 6000)

test_seq     = pad_sequences(test_df.seq.values, maxlen = 6000)


TextCNN训练和预测

# 模块引入
from sklearn.model_selection import StratifiedKFold,KFold 
skf = KFold(n_splits=5, shuffle=True)

# TextCNN传入的参数值
max_len     = 6000
max_cnt     = 295
embed_size  = 256
num_filters = 64
kernel_size = [2,4,6,8,10,12,14]
conv_action = 'relu'
mask_zero   = False
TRAIN       = True

import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"

#zeros()指创建指定长度或形状的全为0的ndarray数组;
#在默认情况下,zeros()创建的数组元素类型为浮点型,如果要使用其他类型可以设置dtype参数返回给定类型的新数组;
#shape:定义返回数组的形状;创建多维数组时,用括号将shape数据组括起来
#shape = (len(train_seq),8)指的是创建len(train_seq)行、8列的矩阵
meta_train = np.zeros(shape = (len(train_seq),8))

meta_test = np.zeros(shape = (len(test_seq),8))
FLAG = True
i = 0

#skf.split()
for tr_ind,te_ind in skf.split(train_labels):
    i +=1
    print('FOLD: '.format(i))
    print(len(te_ind),len(tr_ind)) 
    model_name = 'benchmark_textcnn_fold_'+str(i)
    X_train,X_train_label = train_seq[tr_ind],train_labels[tr_ind]
    X_val,X_val_label     = train_seq[te_ind],train_labels[te_ind]
    
    #textCNN是使用卷积神经网络来进行文本分类,属于CV领域,是用于解决计算机视觉方向问题的模型
    model = TextCNN(max_len,max_cnt,embed_size,num_filters,kernel_size,conv_action,mask_zero)
    
    model_save_path = './NN/%s_%s.hdf5'%(model_name,embed_size)
    early_stopping =EarlyStopping(monitor='val_loss', patience=3)
    model_checkpoint = ModelCheckpoint(model_save_path, save_best_only=True, save_weights_only=True)
    if TRAIN and FLAG:
        
        #fit()函数是机器学习中用于拟合模型的函数,主要输入的是训练集数据,并根据算法拟合出一个模型。最终输出的是一个训练好的模型
        model.fit(X_train,X_train_label,validation_data=(X_val,X_val_label),epochs=100,batch_size=64,shuffle=True,callbacks=[early_stopping,model_checkpoint] )
    
    #load_weights的作用是将预训练好的权值文件加载到模型中
    model.load_weights(model_save_path)
     
    #Predict函数是一种常用的机器学习技术,它可以帮助我们准确地预测未来事件的发生概率。
    #它是基于历史数据,从历史数据中提取特征,并使用机器学习算法来预测未来可能发生的事件。 
    #Predict函数主要用于分析和预测未来事件的发生概率,以及给出相关的建议和措施。
    pred_val = model.predict(X_val,batch_size=128,verbose=1)
    pred_test = model.predict(test_seq,batch_size=128,verbose=1)
    
    #将预测的数据赋值到meta_train中
    meta_train[te_ind] = pred_val

    #将测试集每个file_id对应的api集合预测的数据累加存放到meta_test中
    meta_test += pred_test

    # K.clear_session()将销毁当前的TF图并创建一个新的TF图
    K.clear_session()

meta_test /= 5.0 

结果提交

test_df['prob0'] = 0
test_df['prob1'] = 0
test_df['prob2'] = 0
test_df['prob3'] = 0
test_df['prob4'] = 0
test_df['prob5'] = 0
test_df['prob6'] = 0
test_df['prob7'] = 0

test_df[['prob0','prob1','prob2','prob3','prob4','prob5','prob6','prob7']] = meta_test
test_df[['file_id','prob0','prob1','prob2','prob3','prob4','prob5','prob6','prob7']].to_csv('nn_baseline_5fold.csv',index = None)


博客参考链接:阿里云天池大赛赛题(机器学习)——阿里云安全恶意程序检测(完整代码)_全栈O-Jay的博客-CSDN博客


#头条创作挑战赛#

展开阅读全文

页面更新:2024-04-20

标签:天池   云安   阿里   数组   序列   变量   函数   模块   深度   模型   机器   数据

1 2 3 4 5

上滑加载更多 ↓
推荐阅读:
友情链接:
更多:

本站资料均由网友自行发布提供,仅用于学习交流。如有版权问题,请与我联系,QQ:4156828  

© CopyRight 2008-2024 All Rights Reserved. Powered By bs178.com 闽ICP备11008920号-3
闽公网安备35020302034844号

Top