1
0
Fork 0

Add my DT Learner to defeat_learners assignment

master
Felix Martin 2020-10-05 12:49:58 -04:00
parent db537d7043
commit a662e302db
1 changed files with 108 additions and 64 deletions

View File

@ -1,64 +1,108 @@
""" import numpy as np
A simple wrapper for linear regression. (c) 2015 Tucker Balch
Note, this is NOT a correct DTLearner; Replace with your own implementation.
Copyright 2018, Georgia Institute of Technology (Georgia Tech) class DTLearner:
Atlanta, Georgia 30332 LEAF = -1
All Rights Reserved NA = -1
Template code for CS 4646/7646 def __init__(self, leaf_size=1, verbose=False):
self.leaf_size = leaf_size
Georgia Tech asserts copyright ownership of this template and all derivative self.verbose = verbose
works, including solutions to the projects assigned in this course. Students
and other users of this template code are advised not to share it with others def author(self):
or to make it available on publicly viewable websites including repositories return 'felixm' # replace tb34 with your Georgia Tech username
such as github and gitlab. This copyright statement should not be removed
or edited. def create_node(self, factor, split_value, left, right):
return np.array([(factor, split_value, left, right), ],
We do grant permission to share solutions privately with non-students such dtype='|i4, f4, i4, i4')
as potential employers. However, sharing with other current or future
students of CS 7646 is prohibited and subject to being investigated as a def query_point(self, point):
GT honor code violation. node_index = 0
while self.rel_tree[node_index][0] != self.LEAF:
-----do not edit anything above this line--- node = self.rel_tree[node_index]
split_factor = node[0]
Student Name: Tucker Balch (replace with your name) split_value = node[1]
GT User ID: tb34 (replace with your User ID) if point[split_factor] <= split_value:
GT ID: 900897987 (replace with your GT ID) # Recurse into left sub-tree.
""" node_index += node[2]
else:
import numpy as np node_index += node[3]
import warnings v = self.rel_tree[node_index][1]
return v
class DTLearner(object):
def query(self, points):
def __init__(self, leaf_size=1, verbose = False): """
warnings.warn("\n\n WARNING! THIS IS NOT A CORRECT DTLearner IMPLEMENTATION! REPLACE WITH YOUR OWN CODE\n") @summary: Estimate a set of test points given the model we built.
pass # move along, these aren't the drones you're looking for @param points: should be a numpy array with each row corresponding to a specific query.
@returns the estimated values according to the saved model.
def author(self): """
return 'tb34' # replace tb34 with your Georgia Tech username def query_point(p): return self.query_point(p)
r = np.apply_along_axis(query_point, 1, points)
def addEvidence(self,dataX,dataY): return r
"""
@summary: Add training data to learner def build_tree(self, xs, y):
@param dataX: X values of data to add """
@param dataY: the Y training values @summary: Build a decision tree from the training data.
""" @param dataX: X values of data to add
@param dataY: the Y training values
# slap on 1s column so linear regression finds a constant term """
newdataX = np.ones([dataX.shape[0],dataX.shape[1]+1]) assert(xs.shape[0] == y.shape[0])
newdataX[:,0:dataX.shape[1]]=dataX assert(xs.shape[0] > 0) # If this is 0 something went wrong.
# build and save the model if xs.shape[0] <= self.leaf_size:
self.model_coefs, residuals, rank, s = np.linalg.lstsq(newdataX, dataY, rcond=None) value = np.mean(y)
return self.create_node(self.LEAF, value, self.NA, self.NA)
def query(self,points):
""" if np.all(y[0] == y):
@summary: Estimate a set of test points given the model we built. return self.create_node(self.LEAF, y[0], self.NA, self.NA)
@param points: should be a numpy array with each row corresponding to a specific query.
@returns the estimated values according to the saved model. i, split_value = self.get_i_and_split_value(xs, y)
""" select_l = xs[:, i] <= split_value
return (self.model_coefs[:-1] * points).sum(axis = 1) + self.model_coefs[-1] select_r = xs[:, i] > split_value
lt = self.build_tree(xs[select_l], y[select_l])
if __name__=="__main__": rt = self.build_tree(xs[select_r], y[select_r])
print("the secret clue is 'zzyzx'") root = self.create_node(i, split_value, 1, lt.shape[0] + 1)
root = np.concatenate([root, lt, rt])
return root
def addEvidence(self, data_x, data_y):
"""
@summary: Add training data to learner
@param dataX: X values of data to add
@param dataY: the Y training values
"""
self.rel_tree = self.build_tree(data_x, data_y)
def get_correlations(self, xs, y):
""" Return a list of sorted 2-tuples where the first element
is the correlation and the second element is the index. Sorted by
highest correlation first. """
# a = np.argmax([abs(np.corrcoef(xs[:,i], y)[0, 1])
# for i in range(xs.shape[1])])
correlations = []
for i in range(xs.shape[1]):
c = abs(np.corrcoef(xs[:, i], y=y)[0, 1])
correlations.append((c, i))
correlations.sort(reverse=True)
return correlations
def get_i_and_split_value(self, xs, y):
# If all elements are true we would get one sub-tree with zero
# elements, but we need at least one element in both trees. We avoid
# zero-trees in two steps. First we take the average between the median
# value and a smaller value an use that as the new split value. If that
# doesn't work (when all values are the same) we choose the X with the
# next smaller correlation. We assert that not all values are
# smaller/equal to the split value at the end.
for _, i in self.get_correlations(xs, y):
split_value = np.median(xs[:, i])
select = xs[:, i] <= split_value
if select.all():
for value in xs[:, i]:
if value < split_value:
split_value = (value + split_value) / 2.0
select = xs[:, i] <= split_value
if not select.all():
break
assert(not select.all())
return i, split_value