import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow_addons as tfa
import tensorflow as tf
import datetime
from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D, concatenate, Flatten, Dropout,\
Lambda,BatchNormalization,multiply,GlobalAveragePooling2D,Reshape,Activation,Permute, Concatenate,GlobalMaxPooling2D,Add
from keras.layers import LSTM, Embedding
from tensorflow.keras.backend import batch_dot,sign,sqrt,l2_normalize
from tensorflow.keras.regularizers import l2
from tensorflow.keras.preprocessing.text import Tokenizer
from keras import backend as K
from tensorflow.keras import Input, Model
# from keras.utils import plot_model
from keras.utils.np_utils import to_categorical
from sklearn.metrics import f1_score,precision_score,recall_score,accuracy_score
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
from PIL import Image
import os
import os.path as osp
import random as python_random
import random
random_seed = 12345
random.seed(random_seed)
np.random.seed(random_seed)
python_random.seed(random_seed)
tf.random.set_seed(random_seed)
os.environ['PYTHONHASHSEED'] = str(random_seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
# path_images = 'path_images'
path_txts = 'path_txts'
dir_name = "dir_save"
file_name = osp.join(dir_name, 'scores.txt')
CHANNELS = 3
EPOCHS = 250
BATCH_SIZE = 64
IMG_SIZE = 128
# PATH_FILES = osp.join(path_images, "data_splits")
IMAGE_TYPE = '.color.png'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
CLASS_NAMES = ['Adware', 'Banking', 'SMS', 'Riskware']
categorical = [0, 1, 2 , 3]
one_hot_train_label3 = to_categorical(categorical)
def get_label(file_path):
parts = tf.strings.split(file_path, os.path.sep)
# return parts[-2]
if parts[-2] == 'SMS':
return one_hot_train_label3[2]
elif parts[-2] == 'Adware':
return one_hot_train_label3[0]
elif parts[-2] == 'Banking':
return one_hot_train_label3[1]
else:
return one_hot_train_label3[3]
def get_label2(file_path):
parts = tf.strings.split(file_path, os.path.sep)
# return parts[-2]
if parts[-2] == 'SMS':
return 2
elif parts[-2] == 'Adware':
return 0
elif parts[-2] == 'Banking':
return 1
else:
return 3
def get_image(path_img):
# image = np.asarray(Image.open(path_img))
# postfix = '/'.join(path_img.decode().split('/')[-2:])
# image = np.asarray(
# Image.open(osp.join('/'.join(path_img.decode().split('/')[:-2]), 'color_without_so', postfix).encode()))
# with open(path_img, "r") as f:
# image = f.read()
# f.close()
try:
image = np.asarray(Image.open(path_img))
# image = image/255
except:
postfix = '/'.join(path_img.decode().split('/')[-2:])
image = np.asarray(
Image.open(osp.join('/'.join(path_img.decode().split('/')[:-2]), 'color_without_so', postfix).encode()))
image = tf.convert_to_tensor(image, dtype_hint=None, name=None)
return image
def get_text(path_txt):
# text = np.genfromtxt(path_txt,dtype=[int, float,str])
# try:
# with open(path_txt, "r") as f:
# text = f.read()
# except:
# postfix = '/'.join(path_txt.decode().split('/')[-2:])
# with open(osp.join('/'.join(path_txt.decode().split('/')[:-2]), 'color_without_so', postfix).encode(), "r") as f:
# text = f.read()
# f.close()
# text = np.asarray(Image.open(path_txt))
# with open(path_txt, "r",encoding='utf-8') as f:
# text = f.read()
# f.close()
text = tf.data.TextLineDataset(path_txt)
tokenizer = Tokenizer(num_words=10000)
tokenizer.fit_on_texts(text)
text = tokenizer.texts_to_matrix(text, mode='binary')
return text
def get_shape(image):
return image.shape[0]
def decode_img(path_img):
image = tf.numpy_function(get_image, [path_img], tf.uint8)
shape = tf.numpy_function(get_shape, [image], tf.int32)
image = tf.reshape(image, [shape, 128, CHANNELS])
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, [IMG_SIZE , 128])
return tf.reshape(image, [IMG_SIZE , IMG_SIZE, CHANNELS])
def decodes(path_img):
image = get_image(path_img)
shape = tf.numpy_function(get_shape, [image], tf.int32)
image = tf.reshape(image, [shape, 128, CHANNELS])
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, [IMG_SIZE , IMG_SIZE])
return tf.reshape(image, [IMG_SIZE , IMG_SIZE, CHANNELS])
def decode_text(path_txt):
text = tf.numpy_function(get_text, [path_txt], tf.int32)
# text = get_text(path_txt)
# tokenizer = Tokenizer(num_words=10000)
# tokenizer.fit_on_texts(text)
# one_hot_results = tokenizer.texts_to_matrix(text, mode='binary')
return text
def preprocess_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [128, 128])
image /= 255.0 # normalize to [0,1] range
return image
def load_and_preprocess_image(path):
image = tf.io.read_file(path)
return preprocess_image(image)
def process_path(file_path):
label = get_label(file_path)
img = decode_img(file_path)
return img, label
def process_img(file_path):
# label = get_label(file_path)
img = decode_img(file_path)
return img
def process_label(file_path):
label = get_label(file_path)
# img = decode_img(file_path)
return label
def get_imgs(split: str):
img = []
for n in CLASS_NAMES:
img = img + [os.path.join(path_images, split, n, file) for file in
os.listdir(os.path.join(path_images, split, n))]
return img
def get_texts(split: str):
txt = []
for n in CLASS_NAMES:
txt = txt + [os.path.join(path_txts, split, n, file) for file in
os.listdir(os.path.join(path_txts, split, n))]
return txt
def dot_product(x):
return batch_dot(x[0], x[1], axes=[1,1]) / x[0].get_shape().as_list()[1]
"""
Calculate signed square root
@param
x -> a tensor
"""
def signed_sqrt(x):
return sign(x) * sqrt(abs(x) + 1e-9)
"""
Calculate L2-norm
@param
x -> a tensor
"""
def L2_norm(x, axis=-1):
return l2_normalize(x, axis=axis)
def spatial_attention(input_feature):
avg_pool = Lambda(lambda x:K.mean(x,axis=3,keepdims=True))(input_feature)
max_pool = Lambda(lambda x:K.max(x,axis=3,keepdims=True))(input_feature)
concat = Concatenate(axis=3)([avg_pool,max_pool])
cbam_feature = Conv2D(1,(7,7),strides=1,padding='same',activation='sigmoid')(concat)
return multiply([input_feature,cbam_feature])
def base_model():
input = Input(shape=(IMG_SIZE, 128, 3))
x1 = Conv2D(64, kernel_size=5, activation='relu', padding='same', input_shape=(IMG_SIZE, IMG_SIZE, CHANNELS))(
input)
x1 = MaxPooling2D(pool_size=(4, 4))(x1)
x1 = Conv2D(filters=128, kernel_size=3, activation='relu', padding='same')((x1))
x1 = MaxPooling2D(pool_size=(4, 4))((x1))
x1 = Conv2D(filters=128, kernel_size=5, activation='relu', padding='same')(x1)
# x2 = x1
# x1 = multiply([x2, x1])
x1 = Flatten()(x1)
# x1 = Lambda(L2_norm)(x1)
x1 = Dense(16, activation='relu')(x1)
# x1 = Dropout(0.25)(x1)
output = Dense(4, activation='softmax', name='output')(x1)
base_model = Model(inputs=input, outputs=output)
return base_model
def base_model2():
in_lay = Input(shape=(IMG_SIZE, IMG_SIZE, CHANNELS))
x1 = Conv2D(64, kernel_size=5, activation='relu', padding='same', input_shape=(IMG_SIZE, IMG_SIZE, CHANNELS))(
in_lay)
x1 = MaxPooling2D(pool_size=(4, 4))(x1)
x1 = Conv2D(filters=128, kernel_size=3, activation='relu', padding='same')(x1)
x1 = MaxPooling2D(pool_size=(4, 4))(x1)
base_model = Conv2D(filters=128, kernel_size=5, activation='relu', padding='same')(x1)
# base_model = Flatten()(base_model)
# base_model = Model(inputs=in_lay,out
没有合适的资源?快使用搜索试试~ 我知道了~
资源推荐
资源详情
资源评论
收起资源包目录
使用双线性注意力CNN和字节码图像检测Android恶意软件.zip (4个子文件)
Android-malware-detection-based-on-bilinear-attention-CNN-master
apk2images.py 2KB
Split.py 3KB
multiclass_model.py 39KB
binary_class.py 35KB
共 4 条
- 1
资源评论
博士僧小星
- 粉丝: 1946
- 资源: 5903
下载权益
C知道特权
VIP文章
课程特权
开通VIP
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功