Skip to content

Commit

Permalink
Merge pull request #21 from capaulson/Python3-Support
Browse files Browse the repository at this point in the history
Python3 support
  • Loading branch information
capaulson committed May 30, 2017
2 parents 81adb25 + f7ed958 commit c0ba9af
Show file tree
Hide file tree
Showing 14 changed files with 127 additions and 129 deletions.
17 changes: 9 additions & 8 deletions examples/2D_leave_n_out.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
__author__ = 'cpaulson'
import pyKriging
from pyKriging.krige import kriging
Expand All @@ -15,7 +16,7 @@
# We generate our observed values based on our sampling plan and the test function
y = testfun(X)

print 'Setting up the Kriging Model'
print('Setting up the Kriging Model')
cvMSE = []
# Now that we have our initial data, we can create an instance of a kriging model
k = kriging(X, y, testfunction=testfun, name='simple', testPoints=300)
Expand All @@ -26,7 +27,7 @@

k.plot()
for i in range(15):
print i
print(i)
newpoints = k.infill(1)
for point in newpoints:
# print 'Adding point {}'.format(point)
Expand All @@ -43,15 +44,15 @@

# #And plot the model

print 'Now plotting final results...'
print('Now plotting final results...')
# k.plot()


print k.testPoints
print k.history['points']
print k.history['rsquared']
print k.history['avgMSE']
print cvMSE
print(k.testPoints)
print(k.history['points'])
print(k.history['rsquared'])
print(k.history['avgMSE'])
print(cvMSE)
from matplotlib import pylab as plt
plt.plot(range(len(k.history['rsquared'])), k.history['rsquared'])
plt.plot(range(len(cvMSE)), cvMSE)
Expand Down
11 changes: 6 additions & 5 deletions examples/2D_model_convergence.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
__author__ = 'cpaulson'
import pyKriging
from pyKriging.krige import kriging
Expand All @@ -13,7 +14,7 @@
# We generate our observed values based on our sampling plan and the test function
y = testfun(X)

print 'Setting up the Kriging Model'
print('Setting up the Kriging Model')

# Now that we have our initial data, we can create an instance of a kriging model
k = kriging(X, y, testfunction=testfun, name='simple', testPoints=250)
Expand All @@ -25,17 +26,17 @@
while k.history['rsquared'][-1] < 0.9999:
newpoints = k.infill(2)
for point in newpoints:
print 'Adding point {}'.format(point)
print('Adding point {}'.format(point))
k.addPoint(point, testfun(point)[0])
k.train()
k.snapshot()
print 'Current rsquared is: {}'.format(k.history['rsquared'][-1])
print('Current rsquared is: {}'.format(k.history['rsquared'][-1]))

print 'The prediction has converged, with {} number of points in the model'.format(k.n)
print('The prediction has converged, with {} number of points in the model'.format(k.n))

# #And plot the model

print 'Now plotting final results...'
print('Now plotting final results...')
k.plot()


7 changes: 4 additions & 3 deletions examples/2D_simple_train.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
__author__ = 'cpaulson'
import pyKriging
from pyKriging.krige import kriging
Expand All @@ -13,7 +14,7 @@
# We generate our observed values based on our sampling plan and the test function
y = testfun(X)

print 'Setting up the Kriging Model'
print('Setting up the Kriging Model')

# Now that we have our initial data, we can create an instance of a kriging model
k = kriging(X, y, testfunction=testfun, name='simple', testPoints=250)
Expand All @@ -23,14 +24,14 @@
for i in range(5):
newpoints = k.infill(2)
for point in newpoints:
print 'Adding point {}'.format(point)
print(('Adding point {}'.format(point)))
k.addPoint(point, testfun(point)[0])
k.train()
k.snapshot()

# #And plot the model

print 'Now plotting final results...'
print('Now plotting final results...')
k.plot()


9 changes: 5 additions & 4 deletions examples/2D_simple_train_expected_improvement.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
__author__ = 'cpaulson'
import pyKriging
from pyKriging.krige import kriging
Expand All @@ -15,7 +16,7 @@
optimizer = 'ga'

# Now that we have our initial data, we can create an instance of a kriging model
print 'Setting up the Kriging Model'
print('Setting up the Kriging Model')
k = kriging(X, y, testfunction=testfun, name='simple_ei', testPoints=300)
k.train(optimizer=optimizer)
k.snapshot()
Expand All @@ -25,7 +26,7 @@
for i in range(5):
newpoints = k.infill(1, method='error')
for point in newpoints:
print 'Adding point {}'.format(point)
print('Adding point {}'.format(point))
k.addPoint(point, testfun(point)[0])
k.train(optimizer=optimizer)
k.snapshot()
Expand All @@ -34,13 +35,13 @@
for i in range(5):
newpoints = k.infill(1, method='ei')
for point in newpoints:
print 'Adding point {}'.format(point)
print('Adding point {}'.format(point))
k.addPoint(point, testfun(point)[0])
k.train(optimizer=optimizer)
k.snapshot()

# And plot the results
print 'Now plotting final results...'
print('Now plotting final results...')
k.plot()


Expand Down
13 changes: 7 additions & 6 deletions examples/2d_regression_Kriging.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
__author__ = 'cpaulson'
import sys
sys.path.insert(0, '../')
Expand All @@ -16,26 +17,26 @@

# We generate our observed values based on our sampling plan and the test function
y = testfun(X)
print X, y
print(X, y)

testfun = pyKriging.testfunctions().branin


print 'Setting up the Kriging Model'
print('Setting up the Kriging Model')

# Now that we have our initial data, we can create an instance of a kriging model
k = regression_kriging(X, y, testfunction=testfun, name='simple', testPoints=250)
k.train(optimizer='pso')
k1 = kriging(X, y, testfunction=testfun, name='simple', testPoints=250)
k1.train(optimizer='pso')
print k.Lambda
print(k.Lambda)
k.snapshot()


for i in range(1):
newpoints = k.infill(5)
for point in newpoints:
print 'Adding point {}'.format(point)
print('Adding point {}'.format(point))
newValue = testfun(point)[0]
k.addPoint(point, newValue)
k1.addPoint(point, newValue)
Expand All @@ -45,8 +46,8 @@
#
# # #And plot the model

print 'Now plotting final results...'
print k.Lambda
print('Now plotting final results...')
print(k.Lambda)
k.plot(show=False)
k1.plot()

Expand Down
7 changes: 4 additions & 3 deletions examples/3d_Simple_Train.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
import pyKriging
from pyKriging.krige import kriging
from pyKriging.samplingplan import samplingplan
Expand All @@ -23,13 +24,13 @@
# The infill method can be used for this
# In this example, we will add nine points in three batches. The model gets trained after each stage
for i in range(10):
print k.history['rsquared'][-1]
print 'Infill iteration {0}'.format(i + 1)
print(k.history['rsquared'][-1])
print('Infill iteration {0}'.format(i + 1))
infillPoints = k.infill(10)

# Evaluate the infill points and add them back to the Kriging model
for point in infillPoints:
print 'Adding point {}'.format(point)
print('Adding point {}'.format(point))
k.addPoint(point, testfun(point)[0])

# Retrain the model with the new points added in to the model
Expand Down
7 changes: 4 additions & 3 deletions examples/coKriging.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import print_function
__author__ = 'cpaulson'

import sys
Expand All @@ -14,8 +15,8 @@ def cheap(X):
C=-5
D=0

print X
print ((X+D)*6-2)
print(X)
print(((X+D)*6-2))
return A*np.power( ((X+D)*6-2), 2 )*np.sin(((X+D)*6-2)*2)+((X+D)-0.5)*B+C

def expensive(X):
Expand All @@ -30,7 +31,7 @@ def expensive(X):

ck = coKriging.coKriging(Xc, yc, Xe, ye)
ck.thetac = np.array([1.2073])
print ck.Xc
print(ck.Xc)
ck.updateData()
ck.updatePsi()
ck.neglnlikehood()
Expand Down
6 changes: 3 additions & 3 deletions pyKriging/GlobalSensitivity.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ def sensitivity_Sobol(self, Model, plot=0):
data = np.genfromtxt(dataFile, delimiter=' ', invalid_raise=False)
X = data[:,[3,4]]
VAS = data[:, [5]][:,0]
print VAS
print(VAS)
AASM = data[:, [6]][:,0]
print AASM
print(AASM)
VAD = data[:, [7]][:,0]
print VAD
print(VAD)
40 changes: 20 additions & 20 deletions pyKriging/coKriging.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(self, Xc, yc, Xe, ye):
self.one=ones([self.ne+self.nc,1])
self.y=[self.yc, self.ye]

print 'here1'
print('here1')

def reorder_data(self):
xe = []
Expand All @@ -53,7 +53,7 @@ def reorder_data(self):

for enu,entry in enumerate(self.Xc):
if entry in self.Xe:
print 'Found this value in XE!!'
print('Found this value in XE!!')
for enu1,test in enumerate(self.Xe):
# if entry[0] == test[0] and entry[1] == test[1]:
if entry == test:
Expand Down Expand Up @@ -87,25 +87,25 @@ def updateData(self):
def traincheap(self):
self.kc = kriging(self.Xc, self.yc)
self.kc.train()
print
print()


def distanceXc(self):
self.distanceXc = np.zeros((self.nc,self.nc, self.k))
for i in range( self.nc ):
for j in xrange(i+1,self.nc):
for j in range(i+1,self.nc):
self.distanceXc[i][j] = np.abs((self.Xc[i]-self.Xc[j]))

def distanceXe(self):
self.distanceXe = np.zeros((self.ne,self.ne, self.k))
for i in range( self.ne ):
for j in xrange(i+1,self.ne):
for j in range(i+1,self.ne):
self.distanceXe[i][j] = np.abs((self.Xe[i]-self.Xe[j]))

def distanceXcXe(self):
self.distanceXcXe = np.zeros((self.nc,self.ne, self.k))
for i in range( self.nc ):
for j in xrange(self.ne):
for j in range(self.ne):
self.distanceXcXe[i][j] = np.abs((self.Xc[i]-self.Xe[j]))


Expand All @@ -118,13 +118,13 @@ def updatePsi(self):
# print self.pc
# print self.distanceXc
newPsicXc = np.exp(-np.sum(self.thetac*np.power(self.distanceXc,self.pc), axis=2))
print newPsicXc[0]
print(newPsicXc[0])
self.PsicXc = np.triu(newPsicXc,1)
self.PsicXc = self.PsicXc + self.PsicXc.T + np.mat(eye(self.nc))+np.multiply(np.mat(eye(self.nc)),np.spacing(1))
self.UPsicXc = np.linalg.cholesky(self.PsicXc)
self.UPsicXc = self.UPsicXc.T
print self.PsicXc[0]
print self.UPsicXc
print(self.PsicXc[0])
print(self.UPsicXc)
exit()

newPsicXe = np.exp(-np.sum(self.thetac*np.power(self.distanceXe,self.pc), axis=2))
Expand All @@ -149,22 +149,22 @@ def neglnlikehood(self):
self.muc = c/f
# This only works if yc is transposed, then its a scalar under two layers of arrays. Correct? Not sure

print 'y',self.yd.T
print('y',self.yd.T)
a = np.linalg.solve(self.UPsicXe.T, self.yd)
print 'a',a
print('a',a)
b = np.linalg.solve(self.UPsicXe, a)
print 'b', b
print('b', b)
c = ones([self.ne,1]) * b
print 'c', c
print('c', c)

d = np.linalg.solve(self.UPsicXe.T, ones([self.ne,1], dtype=float))
print d
print(d)

e = np.linalg.solve(self.UPsicXe, d)
print e
print(e)

f = ones([self.ne,1]).T * e
print f
print(f)

self.mud= c/f

Expand All @@ -175,9 +175,9 @@ def neglnlikehood(self):



print self.ne
print self.mud
print self.UPsicXe.T
print(self.ne)
print(self.mud)
print(self.UPsicXe.T)
a = np.linalg.solve(self.UPsicXe.T,(self.yd-ones([self.ne,1])*self.mud))/self.ne
b = np.linalg.solve(self.UPsicXe, a)
self.SigmaSqrd=(self.yd-ones([self.ne,1])*self.mud).T* b
Expand All @@ -197,7 +197,7 @@ def fe(X):
return np.power(X[:,0], 2) + np.power(X[:,1], 2)

if __name__=='__main__':
import samplingplan
from . import samplingplan
import random
sp = samplingplan.samplingplan(2)
X = sp.optimallhc(20)
Expand Down
Loading

0 comments on commit c0ba9af

Please sign in to comment.