89 lines
3.1 KiB
Python
89 lines
3.1 KiB
Python
"""
|
|
template for generating data to fool learners (c) 2016 Tucker Balch
|
|
Copyright 2018, Georgia Institute of Technology (Georgia Tech)
|
|
Atlanta, Georgia 30332
|
|
All Rights Reserved
|
|
|
|
Template code for CS 4646/7646
|
|
|
|
Georgia Tech asserts copyright ownership of this template and all derivative
|
|
works, including solutions to the projects assigned in this course. Students
|
|
and other users of this template code are advised not to share it with others
|
|
or to make it available on publicly viewable websites including repositories
|
|
such as github and gitlab. This copyright statement should not be removed
|
|
or edited.
|
|
|
|
We do grant permission to share solutions privately with non-students such
|
|
as potential employers. However, sharing with other current or future
|
|
students of CS 7646 is prohibited and subject to being investigated as a
|
|
GT honor code violation.
|
|
|
|
-----do not edit anything above this line---
|
|
|
|
Student Name: Tucker Balch (replace with your name)
|
|
GT User ID: tb34 (replace with your User ID)
|
|
GT ID: 900897987 (replace with your GT ID)
|
|
"""
|
|
|
|
import numpy as np
|
|
import pandas as pd
|
|
import math
|
|
|
|
|
|
def best4LinReg(seed=1489683273):
|
|
"""
|
|
This function should return a dataset (X and Y) that will work better for
|
|
linear regression than decision trees.
|
|
|
|
We make Y a simple linear combination of X. That will give the Linear
|
|
Regression algorithm a very easy time (no RMSE at all) and beat the DT
|
|
easily.
|
|
"""
|
|
np.random.seed(seed)
|
|
X = np.random.random(size=(100, 2)) * 200 - 100
|
|
Y = X[:, 0] * -2 + X[:, 1] * 3
|
|
return X, Y
|
|
|
|
|
|
def best4DT(seed=1489683273):
|
|
"""
|
|
This function should return a dataset that will work better for decision
|
|
trees than linear regression.
|
|
"""
|
|
|
|
# Z = np.append(X, Y.reshape(Y.shape[0], 1), 1)
|
|
# pd.DataFrame(Z).to_csv("Z.csv", header=None, index=None)
|
|
# np.random.seed(seed)
|
|
# X = np.random.random(size=(100, 10))*1000-100
|
|
# Y = np.random.random(size=(100,))*1000-100
|
|
|
|
np.random.seed(seed)
|
|
# X_1 = np.random.random(size=(100, 1))*200-100
|
|
# X_2 = np.random.random(size=(100, 1))*200-100
|
|
# X_3 = np.random.random(size=(100, 1))*200-100
|
|
# X_4 = np.random.random(size=(100, 1))*200-100
|
|
# X = np.concatenate([X_1, X_2, X_3, X_4], 1)
|
|
|
|
# XXX: I honestly don't know how to help the DTLearner, yet.
|
|
|
|
X_1 = np.asarray([i for i in range(1, 101)]).reshape(100, 1)
|
|
X_2 = np.asarray([i for i in range(100, 1100, 10)]).reshape(100, 1)
|
|
X_3 = np.asarray([i for i in range(200, 300)]).reshape(100, 1)
|
|
X_4 = np.asarray([i for i in range(300, 400)]).reshape(100, 1)
|
|
X_5 = np.asarray([i for i in range(1, 101)]).reshape(100, 1)
|
|
X_6 = np.asarray([i for i in range(1, 101)]).reshape(100, 1)
|
|
X_7 = np.asarray([i for i in range(1, 101)]).reshape(100, 1)
|
|
X_8 = np.asarray([i for i in range(1, 101)]).reshape(100, 1)
|
|
X = np.concatenate([X_1, X_2, X_3, X_4, X_5, X_6, X_7, X_8], 1)
|
|
# Y = X[:, 0] * 2 + X[:, 1] * 3
|
|
Y = np.random.random(size=(100,)) * 200 - 100
|
|
return X, Y
|
|
|
|
|
|
def author():
|
|
return 'felixm' # Change this to your user ID
|
|
|
|
|
|
if __name__ == "__main__":
|
|
print("they call me Tim.")
|