from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
import numpy as np
import argparse
from os import sys
def sigmoid_activation(x):
# compute the sigmoid activation value for a given input
return 1.0 / (1 + np.exp(-x))
def predict(X, W):
# take the dot product between our features and weight matrix
preds = sigmoid_activation(X.dot(W))
# apply a step function to threshold the outputs
# to binary class labels
preds[preds <= 0.5] = 0
preds[preds > 0] = 1
return preds
def next_batch(X, y, batch_size):
# loop over dataset 'X' in mini-batches yielding a tuple
# of the current batched data and labels.
for i in np.arange(0, X.shape[0], batch_size):
yield (X[i:i + batch_size], y[i:i + batch_size])
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--epochs", type=float, default=100, help="# of epochs")
ap.add_argument("-a", "--alpha", type=float, default=0.01, help="learning rate")
ap.add_argument("-b", "--batch-size", type=int, default=32, help="size of SGD mini-batches")
args = vars(ap.parse_args())
# generate a 2-class classification problem with 1,000 data points
# where each data point is a 2d feature vector.
(X, y) = make_blobs(n_samples=1000, n_features=2, centers=2, cluster_std=1.5, random_state=1)
y = y.reshape((y.shape[0], 1))
# insert a column of 1's as the last entry in the feature matrix -- this little trick allows us to
# treat the bias as a trainable parameter within the weight matrix
X = np.c_[X, np.ones((X.shape[0]))]
# partition the data into training and testing splits using 50%
# of the data for training and the remaining 50% for testing
(trainX, testX, trainY, testY) = train_test_split(X, y, test_size=0.5, random_state=42)
# initialize weight matrix and list of losses
print("[INFO] training...")
W = np.random.randn(X.shape[1], 1)
losses = []
# loop over the desired number of ephocs
for epoch in np.arange(0, args["epochs"]):
# initialize the total loss for the epoch
epoch_loss = []
for (batchX, batchY) in next_batch(X, y, args["batch_size"]):
# take the dot product between our features "X" and the
# weight matrix "W", then pass this value through our signoid activation function
# thereby giving us our predictions on the dataset
preds = sigmoid_activation(batchX.dot(W))
# now that we have our predictions, we need to determine the "error" which is the difference
# between our predictions and true values.
error = preds - batchY
epoch_loss.append(np.sum(error ** 2))
# the gradient descent update is the dot product between
# our features and the error of the predictions
gradient = batchX.T.dot(error)
# in the update stage, all we need to do is "nudge" at the weight matrix
# in the negative direction of the gradient (hence the term "gradient descent")
# by taking a small step towards a set of "more optimal" parameters
W += -args["alpha"] * gradient
# update our loss history by taking the average loss across all batches
loss = np.average(epoch_loss)
losses.append(loss)
# a check to see if an update should be displayed
if epoch == 0 or (epoch + 1) % 5 == 0:
print("[INFO] epoch={}, loss={:.7f}".format(int(epoch + 1), loss))
print("[INFO] evaluating...")
preds = predict(testX, W)
print(classification_report(testY, preds))
# plot the (testing) classification data
plt.style.use("ggplot")
plt.figure()
plt.title("Data")
plt.scatter(testX[:, 0], testX[:, 1], c=testY[:,0], marker="o", s=30)
#construct a figure that plots the loss over time
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, args["epochs"]), losses)
plt.title("Training Loss")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.show()
没有合适的资源?快使用搜索试试~ 我知道了~
资源推荐
资源详情
资源评论
收起资源包目录
源码Deep-Learning-For-Computer-Vision-master (147个子文件)
.gitignore 10B
jemma.png 422KB
000016.png 429B
000033.png 426B
000040.png 426B
000047.png 422B
000013.png 421B
000015.png 421B
000105.png 421B
000008.png 418B
000090.png 413B
000116.png 412B
000017.png 410B
000011.png 408B
000091.png 406B
000028.png 405B
000064.png 402B
000075.png 402B
000061.png 402B
000038.png 401B
000071.png 400B
000025.png 399B
000031.png 399B
000110.png 399B
000043.png 398B
000024.png 398B
000068.png 398B
000027.png 398B
000069.png 398B
000079.png 397B
000093.png 397B
000067.png 395B
000014.png 395B
000029.png 393B
000054.png 393B
000084.png 392B
000112.png 391B
000081.png 389B
000001.png 388B
000065.png 388B
000078.png 385B
000037.png 385B
000107.png 385B
000002.png 385B
000062.png 382B
000066.png 382B
000077.png 382B
000115.png 381B
000109.png 381B
000023.png 381B
000039.png 380B
000048.png 380B
000032.png 378B
000073.png 378B
000063.png 377B
000080.png 377B
000114.png 376B
000045.png 374B
000009.png 374B
000099.png 373B
000051.png 372B
000007.png 371B
000100.png 370B
000058.png 364B
000072.png 364B
000108.png 364B
000030.png 363B
000041.png 363B
000035.png 363B
000059.png 363B
000111.png 362B
000019.png 360B
000096.png 360B
000086.png 360B
000085.png 360B
000076.png 359B
000044.png 358B
000021.png 357B
000004.png 356B
000074.png 356B
000098.png 355B
000046.png 353B
000022.png 352B
000095.png 352B
000006.png 351B
000092.png 351B
000018.png 350B
000094.png 349B
000106.png 349B
000104.png 349B
000050.png 344B
000097.png 344B
000005.png 344B
000003.png 339B
000113.png 337B
000101.png 337B
000102.png 336B
000042.png 335B
000034.png 333B
000053.png 331B
共 147 条
- 1
- 2
资源评论
- weixin_335839582020-03-06浪费积分,大家别下了
- tangmenzhongren2019-11-06有点浪费我的分。。。
keke18532
- 粉丝: 2
- 资源: 6
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功