GPs on Non-Euclidean Input Spaces
GPs on non-Euclidean input spaces have become more and more relevant in recent years, especially for Bayesian Optimization in chemistry. gpCAM can be used for that purpose as long as a correct kernel is defined. Of course, if mean and noise functions are also provided, they have to operate on these non-Euclidean spaces as well.
In this example, we run a small GP on words.
#install the newest version of gpcam
#!pip install gpcam==8.0.3
import numpy as np
import matplotlib.pyplot as plt
from gpcam import GPOptimizer
from dask.distributed import Client
%load_ext autoreload
%autoreload 2
#making the x_data a set will allow us to put any objects or structures into it.
x_data = [('hello'),('world'),('this'),('is'),('gpcam')]
y_data = np.array([2.,1.9,1.8,3.0,5.])
def string_distance(string1, string2):
difference = abs(len(string1) - len(string2))
common_length = min(len(string1),len(string2))
string1 = string1[0:common_length]
string2 = string2[0:common_length]
for i in range(len(string1)):
if string1[i] != string2[i]:
difference += 1.
return difference
def kernel(x1,x2,hps,obj):
d = np.zeros((len(x1),len(x2)))
count1 = 0
for string1 in x1:
count2 = 0
for string2 in x2:
d[count1,count2] = string_distance(string1,string2)
count2 += 1
count1 += 1
return hps[0] * obj.matern_kernel_diff1(d,hps[1])
my_gp = GPOptimizer(x_data,y_data, init_hyperparameters=np.ones((2)),
gp_kernel_function=kernel, info = True)
bounds = np.array([[0.001,100.],[0.001,100]])
my_gp.train(hyperparameter_bounds=bounds)
print("hyperparameters: ", my_gp.hyperparameters)
print("prediction : ",my_gp.posterior_mean(['full'])["f(x)"])
print("uncertainty: ",np.sqrt(my_gp.posterior_covariance(['full'])["v(x)"]))
##which one should I measure next?
print(my_gp.input_space_dim)
my_gp.ask(candidates=[('hello'),('world'),("it"),("is"),("me")], n = 4)