Change best for LinReg to return optimal data
This commit is contained in:
@@ -26,27 +26,63 @@ GT ID: 900897987 (replace with your GT ID)
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
import math
|
import math
|
||||||
|
|
||||||
# this function should return a dataset (X and Y) that will work
|
|
||||||
# better for linear regression than decision trees
|
|
||||||
def best4LinReg(seed=1489683273):
|
def best4LinReg(seed=1489683273):
|
||||||
|
"""
|
||||||
|
This function should return a dataset (X and Y) that will work better for
|
||||||
|
linear regression than decision trees.
|
||||||
|
|
||||||
|
We make Y a simple linear combination of X. That will give the Linear
|
||||||
|
Regression algorithm a very easy time (no RMSE at all) and beat the DT
|
||||||
|
easily.
|
||||||
|
"""
|
||||||
np.random.seed(seed)
|
np.random.seed(seed)
|
||||||
X = np.zeros((100,2))
|
X = np.random.random(size=(100, 2)) * 200 - 100
|
||||||
Y = np.random.random(size = (100,))*200-100
|
Y = X[:, 0] * -2 + X[:, 1] * 3
|
||||||
# Here's is an example of creating a Y from randomly generated
|
|
||||||
# X with multiple columns
|
|
||||||
# Y = X[:,0] + np.sin(X[:,1]) + X[:,2]**2 + X[:,3]**3
|
|
||||||
return X, Y
|
return X, Y
|
||||||
|
|
||||||
|
|
||||||
def best4DT(seed=1489683273):
|
def best4DT(seed=1489683273):
|
||||||
|
"""
|
||||||
|
This function should return a dataset that will work better for decision
|
||||||
|
trees than linear regression.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Z = np.append(X, Y.reshape(Y.shape[0], 1), 1)
|
||||||
|
# pd.DataFrame(Z).to_csv("Z.csv", header=None, index=None)
|
||||||
|
# np.random.seed(seed)
|
||||||
|
# X = np.random.random(size=(100, 10))*1000-100
|
||||||
|
# Y = np.random.random(size=(100,))*1000-100
|
||||||
|
|
||||||
np.random.seed(seed)
|
np.random.seed(seed)
|
||||||
X = np.zeros((100,2))
|
# X_1 = np.random.random(size=(100, 1))*200-100
|
||||||
Y = np.random.random(size = (100,))*200-100
|
# X_2 = np.random.random(size=(100, 1))*200-100
|
||||||
|
# X_3 = np.random.random(size=(100, 1))*200-100
|
||||||
|
# X_4 = np.random.random(size=(100, 1))*200-100
|
||||||
|
# X = np.concatenate([X_1, X_2, X_3, X_4], 1)
|
||||||
|
|
||||||
|
# XXX: I honestly don't know how to help the DTLearner, yet.
|
||||||
|
|
||||||
|
X_1 = np.asarray([i for i in range(1, 101)]).reshape(100, 1)
|
||||||
|
X_2 = np.asarray([i for i in range(100, 1100, 10)]).reshape(100, 1)
|
||||||
|
X_3 = np.asarray([i for i in range(200, 300)]).reshape(100, 1)
|
||||||
|
X_4 = np.asarray([i for i in range(300, 400)]).reshape(100, 1)
|
||||||
|
X_5 = np.asarray([i for i in range(1, 101)]).reshape(100, 1)
|
||||||
|
X_6 = np.asarray([i for i in range(1, 101)]).reshape(100, 1)
|
||||||
|
X_7 = np.asarray([i for i in range(1, 101)]).reshape(100, 1)
|
||||||
|
X_8 = np.asarray([i for i in range(1, 101)]).reshape(100, 1)
|
||||||
|
X = np.concatenate([X_1, X_2, X_3, X_4, X_5, X_6, X_7, X_8], 1)
|
||||||
|
# Y = X[:, 0] * 2 + X[:, 1] * 3
|
||||||
|
Y = np.random.random(size=(100,)) * 200 - 100
|
||||||
return X, Y
|
return X, Y
|
||||||
|
|
||||||
def author():
|
|
||||||
return 'tb34' #Change this to your user ID
|
|
||||||
|
|
||||||
if __name__=="__main__":
|
def author():
|
||||||
|
return 'felixm' # Change this to your user ID
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
print("they call me Tim.")
|
print("they call me Tim.")
|
||||||
|
|||||||
@@ -28,10 +28,12 @@ import DTLearner as dt
|
|||||||
from gen_data import best4LinReg, best4DT
|
from gen_data import best4LinReg, best4DT
|
||||||
|
|
||||||
# compare two learners' rmse out of sample
|
# compare two learners' rmse out of sample
|
||||||
|
|
||||||
|
|
||||||
def compare_os_rmse(learner1, learner2, X, Y):
|
def compare_os_rmse(learner1, learner2, X, Y):
|
||||||
|
|
||||||
# compute how much of the data is training and testing
|
# compute how much of the data is training and testing
|
||||||
train_rows = int(math.floor(0.6* X.shape[0]))
|
train_rows = int(math.floor(0.6 * X.shape[0]))
|
||||||
test_rows = X.shape[0] - train_rows
|
test_rows = X.shape[0] - train_rows
|
||||||
|
|
||||||
# separate out training and testing data
|
# separate out training and testing data
|
||||||
@@ -56,11 +58,12 @@ def compare_os_rmse(learner1, learner2, X, Y):
|
|||||||
|
|
||||||
return rmse1, rmse2
|
return rmse1, rmse2
|
||||||
|
|
||||||
|
|
||||||
def test_code():
|
def test_code():
|
||||||
|
|
||||||
# create two learners and get data
|
# create two learners and get data
|
||||||
lrlearner = lrl.LinRegLearner(verbose = False)
|
lrlearner = lrl.LinRegLearner(verbose=False)
|
||||||
dtlearner = dt.DTLearner(verbose = False, leaf_size = 1)
|
dtlearner = dt.DTLearner(verbose=False, leaf_size=1)
|
||||||
X, Y = best4LinReg()
|
X, Y = best4LinReg()
|
||||||
|
|
||||||
# compare the two learners
|
# compare the two learners
|
||||||
@@ -78,8 +81,8 @@ def test_code():
|
|||||||
print
|
print
|
||||||
|
|
||||||
# get data that is best for a random tree
|
# get data that is best for a random tree
|
||||||
lrlearner = lrl.LinRegLearner(verbose = False)
|
lrlearner = lrl.LinRegLearner(verbose=False)
|
||||||
dtlearner = dt.DTLearner(verbose = False, leaf_size = 1)
|
dtlearner = dt.DTLearner(verbose=False, leaf_size=1)
|
||||||
X, Y = best4DT()
|
X, Y = best4DT()
|
||||||
|
|
||||||
# compare the two learners
|
# compare the two learners
|
||||||
@@ -96,5 +99,6 @@ def test_code():
|
|||||||
print("DT >= 0.9 LR: fail")
|
print("DT >= 0.9 LR: fail")
|
||||||
print
|
print
|
||||||
|
|
||||||
if __name__=="__main__":
|
|
||||||
|
if __name__ == "__main__":
|
||||||
test_code()
|
test_code()
|
||||||
|
|||||||
Reference in New Issue
Block a user