I'd like to implement my own Gaussian kernel in Python, just for exercise. I'm using: sklearn.svm.SVC(kernel=my_kernel) but I really don't understand what is going on.
I expect the function my_kernel to be called with the columns of the X matrix as parameters, instead I got it called with X, X as arguments. Looking at the examples things are not clearer.
import scipy.io
import numpy as np
from sklearn import svm
import matplotlib.pyplot as plt
def svm_class(fileName):
data = scipy.io.loadmat(fileName)
X = data['X']
y = data['y']
f = svm.SVC(kernel = 'rbf', gamma=50, C=1.0)
f.fit(X,y.flatten())
plotData(np.hstack((X,y)), X, f)
return
def plotData(arr, X, f):
ax = plt.subplot(111)
ax.scatter(arr[arr[:,2]==0][:,0], arr[arr[:,2]==0][:,1], c='r', marker='o', label='Zero')
ax.scatter(arr[arr[:,2]==1][:,0], arr[arr[:,2]==1][:,1], c='g', marker='+', label='One')
h = .02 # step size in the mesh
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = f.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z)
plt.xlim(np.min(arr[:,0]), np.max(arr[:,0]))
plt.ylim(np.min(arr[:,1]), np.max(arr[:,1]))
plt.show()
return
def gaussian_kernel(x1,x2):
sigma = 0.5
return np.exp(-np.sum((x1-x2)**2)/(2*sigma**2))
if __name__ == '__main__':
fileName = 'ex6data2.mat'
svm_class(fileName)