function [trainedNet, info] = trainNetwork(varargin)
% trainNetwork Train a neural network
%
% trainedNet = trainNetwork(ds, layers, options) trains and returns a
% network trainedNet for a classification problem. ds is an
% imageDatastore with categorical labels or a MiniBatchable Datastore
% with responses, layers is an array of network layers or a LayerGraph
% and options is a set of training options.
%
% trainedNet = trainNetwork(X, Y, layers, options) trains and returns a
% network, trainedNet. The format for X depends on the input layer. For
% an image input layer, X is a numeric array of images arranged so that
% the first three dimensions are the width, height and channels, and the
% last dimension indexes the individual images. In a classification
% problem, Y specifies the labels for the images as a categorical vector.
% In a regression problem, Y contains the responses arranged as a matrix
% of size number of observations by number of responses, or a four
% dimensional numeric array, where the last dimension corresponds to the
% number of observations.
%
% trainedNet = trainNetwork(C, Y, layers, options) trains an LSTM network
% for classifcation and regression problems for sequence or time-series
% data. layers must define an LSTM network. It must begin with a sequence
% input layer. C is a cell array containing sequence or time-series
% predictors. The entries of C are D-by-S matrices where D is the number
% of values per timestep, and S is the length of the sequence. For
% sequence-to-label classification problems, Y is a categorical vector of
% labels. For sequence-to-sequence classification problems, Y is a cell
% array of categorical sequences. For sequence-to-one regression
% problems, Y is a matrix of targets. For sequence-to-sequence regression
% problems, Y is a cell array of numeric sequences. For
% sequence-to-sequence problems, the number of time steps of the
% sequences in Y must be identical to the corresponding predictor
% sequences in C. For sequence-to-sequence problems with one observation,
% C can be a matrix, and Y must be a categorical sequence of labels or a
% matrix of responses.
%
% trainedNet = trainNetwork(tbl, layers, options) trains and returns a
% network, trainedNet. For networks with an image input layer, tbl is a
% table containing predictors in the first column as either absolute or
% relative image paths or images. Responses must be in the second column
% as categorical labels for the images. In a regression problem,
% responses must be in the second column as either vectors or cell arrays
% containing 3-D arrays or in multiple columns as scalars. For networks
% with a sequence input layer, tbl is a table containing absolute or
% relative MAT file paths of predictors in the first column. For a
% sequence-to-label classification problem, the second column must be a
% categorical vector of labels. For a sequence-to-one regression problem,
% the second column must be a numeric array of responses or in multiple
% columns as scalars. For a sequence-to-sequence classification problem,
% the second column must be an absolute or relative file path to a MAT
% file with a categorical sequence. For a sequence-to-sequence regression
% problem, the second column must be an absolute or relative file path to
% a MAT file with a numeric response sequence.
%
% trainedNet = trainNetwork(tbl, responseNames, ...) trains and returns a
% network, trainedNet. responseNames is a character vector, a string
% array, or a cell array of character vectors specifying the names of the
% variables in tbl that contain the responses.
%
% [trainedNet, info] = trainNetwork(...) trains and returns a network,
% trainedNet. info contains information on training progress.
%
% Example 1:
% Train a convolutional neural network on some synthetic images
% of handwritten digits. Then run the trained network on a test
% set, and calculate the accuracy.
%
% [XTrain, YTrain] = digitTrain4DArrayData;
%
% layers = [ ...
% imageInputLayer([28 28 1])
% convolution2dLayer(5,20)
% reluLayer
% maxPooling2dLayer(2,'Stride',2)
% fullyConnectedLayer(10)
% softmaxLayer
% classificationLayer];
% options = trainingOptions('sgdm', 'Plots', 'training-progress');
% net = trainNetwork(XTrain, YTrain, layers, options);
%
% [XTest, YTest] = digitTest4DArrayData;
%
% YPred = classify(net, XTest);
% accuracy = sum(YTest == YPred)/numel(YTest)
%
% Example 2:
% Train a long short-term memory network to classify speakers of a
% spoken vowel sounds on preprocessed speech data. Then make
% predictions using a test set, and calculate the accuracy.
%
% [XTrain, YTrain] = japaneseVowelsTrainData;
%
% layers = [ ...
% sequenceInputLayer(12)
% lstmLayer(100, 'OutputMode', 'last')
% fullyConnectedLayer(9)
% softmaxLayer
% classificationLayer];
% options = trainingOptions('adam', 'Plots', 'training-progress');
% net = trainNetwork(XTrain, YTrain, layers, options);
%
% [XTest, YTest] = japaneseVowelsTestData;
%
% YPred = classify(net, XTest);
% accuracy = sum(YTest == YPred)/numel(YTest)
%
% Example 3:
% Train a network on synthetic digit data, and measure its
% accuracy:
%
% [XTrain, YTrain] = digitTrain4DArrayData;
%
% layers = [
% imageInputLayer([28 28 1], 'Name', 'input')
% convolution2dLayer(5, 20, 'Name', 'conv_1')
% reluLayer('Name', 'relu_1')
% convolution2dLayer(3, 20, 'Padding', 1, 'Name', 'conv_2')
% reluLayer('Name', 'relu_2')
% convolution2dLayer(3, 20, 'Padding', 1, 'Name', 'conv_3')
% reluLayer('Name', 'relu_3')
% additionLayer(2,'Name', 'add')
% fullyConnectedLayer(10, 'Name', 'fc')
% softmaxLayer('Name', 'softmax')
% classificationLayer('Name', 'classoutput')];
%
% lgraph = layerGraph(layers);
%
% lgraph = connectLayers(lgraph, 'relu_1', 'add/in2');
%
% plot(lgraph);
%
% options = trainingOptions('sgdm', 'Plots', 'training-progress');
% [net,info] = trainNetwork(XTrain, YTrain, lgraph, options);
%
% [XTest, YTest] = digitTest4DArrayData;
% YPred = classify(net, XTest);
% accuracy = sum(YTest == YPred)/numel(YTest)
%
% See also nnet.cnn.layer, trainingOptions, SeriesNetwork, DAGNetwork, LayerGraph.
% Copyright 2015-2018 The MathWorks, Inc.
narginchk(3,4);
try
[layersOrGraph, opts, X, Y] = iParseInputArguments(varargin{:});
[trainedNet, info] = doTrainNetwork(layersOrGraph, opts, X, Y);
catch e
iThrowCNNException( e );
end
end
function [trainedNet, info] = doTrainNetwork(layersOrGraph, opts, X, Y)
haveDAGNetwork = iHaveDAGNetwork(layersOrGraph);
analyzedLayers = iInferParameters(layersOrGraph);
layers = analyzedLayers.ExternalLayers;
internalLayers = analyzedLayers.InternalLayers;
% Validate training data
iValidateTrainingDataForProblem( X, Y, layers );
% Set desired precision
precision = nnet.internal.cnn.util.Precision('single');
% Set up and validate parallel training
isRNN = nnet.internal.cnn.util.isRNN( internalLayers );
executionSettings = nnet.internal.cnn.assembler.setupExecutionEnvironment(...
opts, isRNN, X, precision );
% Create a training dispatcher
trainingDispatcher = iCreateTrainingDataDispatcher(X, Y, opts, ...
executionSettings, layers);
% Create a validation dispatcher if validation data was passed in
validationDispatcher = iValidationDispatcher( opts, executionSettings, ...
layers );
% Assert that trai
神经网络机器学习智能算法画图绘图
- 粉丝: 2825
- 资源: 660
最新资源
- 基于flask和bootstrap的实验室预约系统全部资料+详细文档.zip
- 基于Python+Flask开发的Web应用防火墙全部资料+详细文档.zip
- 基于Python3 + Flask 编写的物业信息管理系统全部资料+详细文档.zip
- 机械设计双层结构摇匀机(sw16可编辑+工程图+BOM)全套设计资料100%好用.zip
- 光伏MPPT仿真-模糊控制
- 储能的削峰填谷作用,如下图所示的削峰填谷数学模型,利用cplex求解混合整数规划可得结果
- 企业网络搭建及应用竞赛模拟答题卡-网络配置与安全管理方案
- 基于Java web高校学生综合管理系统的设计与开发全部资料+详细文档.zip
- 基于Java的大学社团管理系统,高校社团管理全部资料+详细文档.zip
- 基于jsp+javabean+servlet模式的高校学生党员信息管理系统全部资料+详细文档.zip
- 基于java的高校排课教务系统全部资料+详细文档.zip
- 基于Springboot+layui+mybatis+shiro等框架高校教务管理系统全部资料+详细文档.zip
- 基于Springboot+Vue3+MySql搭建高校社团管理系统全部资料+详细文档.zip
- 基于Springboot+Vue高校学科竞赛平台毕业源码案例设计全部资料+详细文档.zip
- 基于SpringBoot+Vue实现的高校毕业生就业服务平台全部资料+详细文档.zip
- 基于Springboot的高校学科竞赛管理系统全部资料+详细文档.zip
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈