import os
import sys
import pywt
from pywt import wavedec
from __init__ import ap_entropy, samp_entropy
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from mnist_new import mlp, create_training_set
from sklearn.ensemble import RandomForestClassifier
A=[]
B=[]
C=[]
D=[]
E=[]
for fl in os.listdir("../../../ALL/A/"):
inp = []
path = "../../../ALL/A/" + fl
txt = open(path,'r')
for line in txt:
feature = line.split()[0]
inp.append(feature)
a = np.array(inp)
A.append(a)
for fl in os.listdir("../../../ALL/B/"):
inp = []
path = "../../../ALL/B/" + fl
txt = open(path,'r')
for line in txt:
feature = line.split()[0]
inp.append(feature)
a = np.array(inp)
B.append(a)
for fl in os.listdir("../../../ALL/C/"):
inp = []
path = "../../../ALL/C/" + fl
txt = open(path,'r')
for line in txt:
feature = line.split()[0]
inp.append(feature)
a = np.array(inp)
C.append(a)
for fl in os.listdir("../../../ALL/D/"):
inp = []
path = "../../../ALL/D/" + fl
txt = open(path,'r')
for line in txt:
feature = line.split()[0]
inp.append(feature)
a = np.array(inp)
D.append(a)
for fl in os.listdir("../../../ALL/E/"):
inp = []
path = "../../../ALL/E/" + fl
txt = open(path,'r')
for line in txt:
feature = line.split()[0]
inp.append(feature)
a = np.array(inp)
E.append(a)
A_ = []
B_ = []
C_ = []
D_ = []
E_ = []
for x in A:
coeffs = wavedec(x,'db4',level=8)
A_.append(coeffs)
for x in B:
coeffs = wavedec(x,'db4',level=8)
B_.append(coeffs)
for x in C:
coeffs = wavedec(x,'db4',level=8)
C_.append(coeffs)
for x in D:
coeffs = wavedec(x,'db4',level=8)
D_.append(coeffs)
for x in E:
coeffs = wavedec(x,'db4',level=8)
E_.append(coeffs)
a=[]
b=[]
c=[]
d=[]
e=[]
y_a = []
y_b = []
y_c = []
y_d = []
y_e = []
f=[]
y_f=[]
inputs = []
outputs=[]
minm = [1000000000000 for i in range(1,28)]
maxm = [0 for i in range(1,28)]
inp = []
out = []
for x in A_:
features = []
j=0
for y in x:
coef = np.array(y)
energy = np.sum(coef**2)
minm[j] = min(minm[j],energy)
maxm[j] = max(maxm[j],energy)
j=j+1
#approx_en = ap_entropy(coef,2,0.5)
#minm[j] = min(minm[j],approx_en)
#maxm[j] = max(maxm[j],approx_en)
#j=j+1
#samp_en = samp_entropy(coef,2,0.5)
#minm[j] = min(minm[j],samp_en)
#maxm[j] = max(maxm[j],samp_en)
#j=j+1
mean = np.mean(coef)
minm[j]= min(minm[j],mean)
maxm[j] = max(maxm[j],mean)
j=j+1
std = np.std(coef)
minm[j] = min(minm[j],std)
maxm[j] = max(maxm[j],std)
j=j+1
features.append(energy)
#features.append(approx_en)
#features.append(samp_en)
features.append(mean)
features.append(std)
a.append(features)
y_a.append(0)
inputs.append(features)
outputs.append("Class A")
print("A done")
for x in B_:
features = []
j=0
for y in x:
coef = np.array(y)
energy = np.sum(coef**2)
minm[j] = min(minm[j],energy)
maxm[j] = max(maxm[j],energy)
j=j+1
#approx_en = ap_entropy(coef,2,0.5)
#minm[j] = min(minm[j],approx_en)
#maxm[j] = max(maxm[j],approx_en)
#j=j+1
#samp_en = samp_entropy(coef,2,0.5)
#minm[j] = min(minm[j],samp_en)
#maxm[j] = max(maxm[j],samp_en)
#j=j+1
mean = np.mean(coef)
minm[j]= min(minm[j],mean)
maxm[j] = max(maxm[j],mean)
j=j+1
std = np.std(coef)
minm[j] = min(minm[j],std)
maxm[j]= max(maxm[j],std)
j=j+1
features.append(energy)
#features.append(approx_en)
#features.append(samp_en)
features.append(mean)
features.append(std)
b.append(features)
y_b.append(0)
inputs.append(features)
outputs.append("Class A")
print("B done")
for x in C_:
features = []
j=0
for y in x:
coef = np.array(y)
energy = np.sum(coef**2)
minm[j] = min(minm[j],energy)
maxm[j] = max(maxm[j],energy)
j=j+1
#approx_en = ap_entropy(coef,2,0.5)
#minm[j] = min(minm[j],approx_en)
#maxm[j] = max(maxm[j],approx_en)
#j=j+1
#samp_en = samp_entropy(coef,2,0.5)
#minm[j] = min(minm[j],samp_en)
#maxm[j] = max(maxm[j],samp_en)
#j=j+1
mean = np.mean(coef)
minm[j]= min(minm[j],mean)
maxm[j] = max(maxm[j],mean)
j=j+1
std = np.std(coef)
minm[j] = min(minm[j],std)
maxm[j]= max(maxm[j],std)
j=j+1
features.append(energy)
#features.append(approx_en)
#features.append(samp_en)
features.append(mean)
features.append(std)
c.append(features)
y_c.append(1)
inputs.append(features)
outputs.append("Class B")
print("C done")
for x in D_:
features = []
j=0
for y in x:
coef = np.array(y)
energy = np.sum(coef**2)
minm[j] = min(minm[j],energy)
maxm[j] = max(maxm[j],energy)
j=j+1
#approx_en = ap_entropy(coef,2,0.5)
#minm[j] = min(minm[j],approx_en)
#maxm[j] = max(maxm[j],approx_en)
#j=j+1
#samp_en = samp_entropy(coef,2,0.5)
#minm[j] = min(minm[j],samp_en)
#maxm[j] = max(maxm[j],samp_en)
#j=j+1
mean = np.mean(coef)
minm[j]= min(minm[j],mean)
maxm[j] = max(maxm[j],mean)
j=j+1
std = np.std(coef)
minm[j] = min(minm[j],std)
maxm[j]= max(maxm[j],std)
j=j+1
features.append(energy)
#features.append(approx_en)
#features.append(samp_en)
features.append(mean)
features.append(std)
d.append(features)
y_d.append(1)
inputs.append(features)
outputs.append("Class B")
print("D Done")
for x in E_:
features = []
j=0
for y in x:
coef = np.array(y)
energy = np.sum(coef**2)
minm[j] = min(minm[j],energy)
maxm[j] = max(maxm[j],energy)
j=j+1
#approx_en = ap_entropy(coef,2,0.5)
#minm[j] = min(minm[j],approx_en)
#maxm[j] = max(maxm[j],approx_en)
#j=j+1
#samp_en = samp_entropy(coef,2,0.5)
#minm[j] = min(minm[j],samp_en)
#maxm[j] = max(maxm[j],samp_en)
#j=j+1
mean = np.mean(coef)
minm[j]= min(minm[j],mean)
maxm[j] = max(maxm[j],mean)
j=j+1
std = np.std(coef)
minm[j] = min(minm[j],std)
maxm[j]= max(maxm[j],std)
j=j+1
features.append(energy)
#features.append(approx_en)
#features.append(samp_en)
features.append(mean)
features.append(std)
e.append(features)
f.append(features)
y_e.append(2)
y_f.a
rf_test.rar_信号_测试_脑电_脑电信号 癫痫_脑电信号特征
版权申诉
12 浏览量
2022-09-24
09:35:27
上传
评论
收藏 2KB RAR 举报
alvarocfc
- 粉丝: 108
- 资源: 1万+
最新资源
- (2005-2014期间)中国环境统计年鉴
- 2015高中信息技术excel操作题及素材(精品文档).xls
- SW3518S全协议快充USB Type-c接口电源模块硬件参考设计评估版硬件(原理图 +pcb)+封装库文件.zip
- 基于深度强化学习算法实现多星对区域目标观测的规划python源码+数据集+模型+超详细注释.zip
- RT1052+SDRAM(IS42S16160) +SIM7600CE(PCIE接口封装)控制板硬件(原理图+PCB)+封装库
- 2017大学英语四级词汇-excel-列表版(精品文档).xls
- 2017版国家医保药品目录(excel版)完整版.xls
- 基于STM32F103单片机设计的无刷电机控制板硬件(原理图+PCB+BOM)+MCU软件控制源码+文档资料.zip
- 肺结节检测数据集VOC+YOLO格式1186张1类别.zip
- Faster-RCNN基于知识蒸馏的目标检测模型增量深度学习方法python源码+项目运行说明.zip
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈