/*
* File: pr_loqo.c
* Purpose: solves quadratic programming problem for pattern recognition
* for support vectors
*
* Author: Alex J. Smola
* Created: 10/14/97
* Updated: 11/08/97
* Updated: 13/08/98 (removed exit(1) as it crashes svm lite when the margin
* in a not sufficiently conservative manner)
*
*
* Copyright (c) 1997 GMD Berlin - All rights reserved
* THIS IS UNPUBLISHED PROPRIETARY SOURCE CODE of GMD Berlin
* The copyright notice above does not evidence any
* actual or intended publication of this work.
*
* Unauthorized commercial use of this software is not allowed
*/
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include "pr_loqo.h"
#define max(A, B) ((A) > (B) ? (A) : (B))
#define min(A, B) ((A) < (B) ? (A) : (B))
#define sqr(A) ((A) * (A))
#define ABS(A) ((A) > 0 ? (A) : (-(A)))
#define PREDICTOR 1
#define CORRECTOR 2
/*****************************************************************
replace this by any other function that will exit gracefully
in a larger system
***************************************************************/
void nrerror(char error_text[])
{
printf("ERROR: terminating optimizer - %s\n", error_text);
/* exit(1); */
}
/*****************************************************************
taken from numerical recipes and modified to accept pointers
moreover numerical recipes code seems to be buggy (at least the
ones on the web)
cholesky solver and backsubstitution
leaves upper right triangle intact (rows first order)
***************************************************************/
void choldc(double a[], int n, double p[])
{
void nrerror(char error_text[]);
int i, j, k;
double sum;
for (i = 0; i < n; i++){
for (j = i; j < n; j++) {
sum=a[n*i + j];
for (k=i-1; k>=0; k--) sum -= a[n*i + k]*a[n*j + k];
if (i == j) {
if (sum <= 0.0) {
nrerror("choldc failed, matrix not positive definite");
sum = 0.0;
}
p[i]=sqrt(sum);
} else a[n*j + i] = sum/p[i];
}
}
}
void cholsb(double a[], int n, double p[], double b[], double x[])
{
int i, k;
double sum;
for (i=0; i<n; i++) {
sum=b[i];
for (k=i-1; k>=0; k--) sum -= a[n*i + k]*x[k];
x[i]=sum/p[i];
}
for (i=n-1; i>=0; i--) {
sum=x[i];
for (k=i+1; k<n; k++) sum -= a[n*k + i]*x[k];
x[i]=sum/p[i];
}
}
/*****************************************************************
sometimes we only need the forward or backward pass of the
backsubstitution, hence we provide these two routines separately
***************************************************************/
void chol_forward(double a[], int n, double p[], double b[], double x[])
{
int i, k;
double sum;
for (i=0; i<n; i++) {
sum=b[i];
for (k=i-1; k>=0; k--) sum -= a[n*i + k]*x[k];
x[i]=sum/p[i];
}
}
void chol_backward(double a[], int n, double p[], double b[], double x[])
{
int i, k;
double sum;
for (i=n-1; i>=0; i--) {
sum=b[i];
for (k=i+1; k<n; k++) sum -= a[n*k + i]*x[k];
x[i]=sum/p[i];
}
}
/*****************************************************************
solves the system | -H_x A' | |x_x| = |c_x|
| A H_y| |x_y| |c_y|
with H_x (and H_y) positive (semidefinite) matrices
and n, m the respective sizes of H_x and H_y
for variables see pg. 48 of notebook or do the calculations on a
sheet of paper again
predictor solves the whole thing, corrector assues that H_x didn't
change and relies on the results of the predictor. therefore do
_not_ modify workspace
if you want to speed tune anything in the code here's the right
place to do so: about 95% of the time is being spent in
here. something like an iterative refinement would be nice,
especially when switching from double to single precision. if you
have a fast parallel cholesky use it instead of the numrec
implementations.
side effects: changes H_y (but this is just the unit matrix or zero anyway
in our case)
***************************************************************/
void solve_reduced(int n, int m, double h_x[], double h_y[],
double a[], double x_x[], double x_y[],
double c_x[], double c_y[],
double workspace[], int step)
{
int i,j,k;
double *p_x;
double *p_y;
double *t_a;
double *t_c;
double *t_y;
p_x = workspace; /* together n + m + n*m + n + m = n*(m+2)+2*m */
p_y = p_x + n;
t_a = p_y + m;
t_c = t_a + n*m;
t_y = t_c + n;
if (step == PREDICTOR) {
choldc(h_x, n, p_x); /* do cholesky decomposition */
for (i=0; i<m; i++) /* forward pass for A' */
chol_forward(h_x, n, p_x, a+i*n, t_a+i*n);
for (i=0; i<m; i++) /* compute (h_y + a h_x^-1A') */
for (j=i; j<m; j++)
for (k=0; k<n; k++)
h_y[m*i + j] += t_a[n*j + k] * t_a[n*i + k];
choldc(h_y, m, p_y); /* and cholesky decomposition */
}
chol_forward(h_x, n, p_x, c_x, t_c);
/* forward pass for c */
for (i=0; i<m; i++) { /* and solve for x_y */
t_y[i] = c_y[i];
for (j=0; j<n; j++)
t_y[i] += t_a[i*n + j] * t_c[j];
}
cholsb(h_y, m, p_y, t_y, x_y);
for (i=0; i<n; i++) { /* finally solve for x_x */
t_c[i] = -t_c[i];
for (j=0; j<m; j++)
t_c[i] += t_a[j*n + i] * x_y[j];
}
chol_backward(h_x, n, p_x, t_c, x_x);
}
/*****************************************************************
matrix vector multiplication (symmetric matrix but only one triangle
given). computes m*x = y
no need to tune it as it's only of O(n^2) but cholesky is of
O(n^3). so don't waste your time _here_ although it isn't very
elegant.
***************************************************************/
void matrix_vector(int n, double m[], double x[], double y[])
{
int i, j;
for (i=0; i<n; i++) {
y[i] = m[(n+1) * i] * x[i];
for (j=0; j<i; j++)
y[i] += m[i + n*j] * x[j];
for (j=i+1; j<n; j++)
y[i] += m[n*i + j] * x[j];
}
}
/*****************************************************************
call only this routine; this is the only one you're interested in
for doing quadratical optimization
the restart feature exists but it may not be of much use due to the
fact that an initial setting, although close but not very close the
the actual solution will result in very good starting diagnostics
(primal and dual feasibility and small infeasibility gap) but incur
later stalling of the optimizer afterwards as we have to enforce
positivity of the slacks.
***************************************************************/
int pr_loqo(int n, int m, double c[], double h_x[], double a[], double b[],
double l[], double u[], double primal[], double dual[],
int verb, double sigfig_max, int counter_max,
double margin, double bound, int restart)
{
/* the knobs to be tuned ... */
/* double margin = -0.95; we will go up to 95% of the
distance between old variables and zero */
/* double bound = 10; preset value for the start. small
values give good initial
feasibility but may result in slow
convergence afterwards: we're too
close to zero */
/* to be allocated */
double *workspace;
double *diag_h_x;
double *h_y;
double *c_x;
double *c_y;
double *h_dot_x;
double *rho;
double *nu;
double *tau;
double *sigma;
double *gamma_z;
double *gamma_s;
double *hat_nu;
double *hat_tau;
double *delta_x;
double *delta_y;
double *delta_s;
double *delta_z;
double *delta_g;
double *delta_t;
double *d;
/* from the header - pointers into primal and dual */
double *x;
double *y;
double *g;
double *z;
double *s;
double *t;
/* auxiliary variables */
double b_plus_1;
double c_plus_1;
double x_h_x;
double primal_inf;
double dual_inf;
double sigfig;
double primal_obj, dual_obj;
double mu;
double alfa, step;
int counte
支持向量机的MATLAB编程

支持向量机(Support Vector Machine,SVM)是一种在机器学习领域广泛应用的监督学习模型,尤其在分类和回归分析中表现出色。MATLAB作为一个强大的数值计算环境,提供了丰富的工具箱来支持SVM的编程和应用。本文将详细介绍如何在MATLAB中使用SVM进行建模和实现。
我们要理解SVM的基本概念。SVM的目标是找到一个超平面,能够最大程度地将不同类别的数据分开。这个超平面是通过最大化间隔(margin)来确定的,间隔是指离最近的数据点(支持向量)到超平面的距离。在面临非线性问题时,SVM通过核函数将原始数据映射到高维空间,使原本难以分隔的样本变得容易分隔。
在MATLAB中,我们可以利用`svmtrain`函数来训练SVM模型。这个函数接受一个数据矩阵和对应的类别标签作为输入,返回一个SVM模型。例如:
```matlab
X = % 输入数据矩阵,每一行代表一个样本,列对应特征
Y = % 标签向量,1代表一类,-1代表另一类
model = svmtrain(X, Y);
```
`svmtrain`函数有多个可选参数,如核函数类型、正则化参数C等,可以通过设置选项结构体`svmoptions`来调整。例如,选择径向基函数(RBF)核并设置C为1:
```matlab
options = svmoptions('KernelFunction', 'rbf', 'BoxConstraint', 1);
model = svmtrain(X, Y, options);
```
训练完成后,我们可以使用`svmpredict`函数对新数据进行预测:
```matlab
testX = % 测试数据矩阵
predictedY = svmpredict(testY, testX, model);
```
`gridview`是MATLAB中用于参数调优的工具,它可以生成一个网格搜索的结果,帮助我们找到最佳的SVM参数组合。例如,如果我们想在C和γ参数上进行网格搜索,可以这样做:
```matlab
cValues = logspace(-3, 3, 7); % C参数范围
gammaValues = logspace(-3, 3, 7); % γ参数范围
[bestC, bestGamma] = gridsearch(@(c,gamma) svmtrain(X, Y, struct('KernelFunction', 'rbf', 'BoxConstraint', c, 'KernelScale', gamma)), cValues, gammaValues);
```
`svm`压缩包文件可能包含了示例数据集、预处理脚本或SVM模型文件。这些资源可以帮助初学者快速上手SVM编程,或者为研究者提供已训练好的模型进行进一步分析。
MATLAB提供了一套完整的工具来实现和支持向量机的学习与应用。通过理解和熟练掌握`svmtrain`、`svmpredict`以及`gridview`等函数,你可以有效地在MATLAB环境中构建、训练和优化SVM模型,解决各种实际问题。同时,不断探索不同的核函数和参数设置,可以提升模型的性能和泛化能力。

shelbycn
- 粉丝: 1
- 资源: 1
最新资源
- 《使用 C# 和 .NET 平台可视化数据的资源》(毕业设计,源码,教程)简单部署即可运行 功能完善、操作简单,适合毕设或课程设计.zip
- 《使用 php 和 MySqli 数据库的高级购物车教程》(毕业设计,源码,教程)简单部署即可运行 功能完善、操作简单,适合毕设或课程设计.zip
- 基于51单片机射频RFID卡考勤人数计数系统设计
- 双路foc工程源码解析:节省芯片资源,独立控制每路8k,F4主控及原理图pdf附送,双路foc工程源码解析:节省芯片资源,独立控制每路8k,主控f4,附原理图pdf及keil工程文件,双路foc工程源
- 基于雷达信号处理技术的ISAR二维成像:RD算法与MATLAB仿真代码研究与实践指南,基于RD算法的ISAR成像技术研究:解线频调距离压缩与运动补偿的MATLAB仿真代码与实践,雷达信号处理 ISAR
- (源码)基于Vue 3框架的Web应用开发项目.zip
- (源码)基于Arduino的太阳能智能士兵制服.zip
- 基于Matlab平台的暗通道先验算法图像去雾系统-含多维度调整功能的可视化操作界面与算法分析,基于Matlab平台的暗通道先验算法图像去雾系统-含界面交互、五大算子边缘检测与直方图展示的智能去雾解
- 电力行业数据资源目录构建方案汇报(39页 PPT).pptx
- 国内外银行业大数据应用案例集(87页).pptx
- 基于战略的全面绩效管理体系设计(147页).pptx
- 逻辑架构模型PPT模板(74页).pptx
- 汽车设备制造业企业信息化业务解决方案 (102页).pptx
- 数据治理平台建设与应用技术方案(41页).pptx
- 数据目录管理平台.pptx
- 数据资产管理与数据安全.pptx