Implement first version of strategy learner

This version does not pass the automatic test.
This commit is contained in:
2020-11-04 15:14:27 -05:00
parent c40ffcf84b
commit 05db89e8c2
3 changed files with 423 additions and 411 deletions

View File

@@ -1,88 +1,94 @@
"""
Template for implementing StrategyLearner (c) 2016 Tucker Balch
Copyright 2018, Georgia Institute of Technology (Georgia Tech)
Atlanta, Georgia 30332
All Rights Reserved
Template code for CS 4646/7646
Georgia Tech asserts copyright ownership of this template and all derivative
works, including solutions to the projects assigned in this course. Students
and other users of this template code are advised not to share it with others
or to make it available on publicly viewable websites including repositories
such as github and gitlab. This copyright statement should not be removed
or edited.
We do grant permission to share solutions privately with non-students such
as potential employers. However, sharing with other current or future
students of CS 7646 is prohibited and subject to being investigated as a
GT honor code violation.
-----do not edit anything above this line---
Student Name: Tucker Balch (replace with your name)
GT User ID: tb34 (replace with your User ID)
GT ID: 900897987 (replace with your GT ID)
"""
import datetime as dt
import pandas as pd
import util as ut
import util
import indicators
from BagLearner import BagLearner
from RTLearner import RTLearner
class StrategyLearner(object):
# constructor
def __init__(self, verbose = False, impact=0.0, commission=0.0):
def __init__(self, verbose=False, impact=0.0, commission=0.0, testing=False):
self.verbose = verbose
self.impact = impact
self.commission = commission
self.testing = testing
# this method should create a QLearner, and train it for trading
def addEvidence(self, symbol = "IBM", \
sd=dt.datetime(2008,1,1), \
ed=dt.datetime(2009,1,1), \
sv = 10000):
# add your code to do learning here
# example usage of the old backward compatible util function
syms=[symbol]
dates = pd.date_range(sd, ed)
prices_all = ut.get_data(syms, dates) # automatically adds SPY
prices = prices_all[syms] # only portfolio symbols
# prices_SPY = prices_all['SPY'] # only SPY, for comparison later
if self.verbose: print(prices)
# example use with new colname
volume_all = ut.get_data(syms, dates, colname = "Volume") # automatically adds SPY
def _get_volume(self):
"""For reference."""
volume_all = ut.get_data(syms, dates, colname="Volume")
volume = volume_all[syms] # only portfolio symbols
# volume_SPY = volume_all['SPY'] # only SPY, for comparison later
if self.verbose: print(volume)
if self.verbose:
print(volume)
# this method should use the existing policy and test it against new data
def testPolicy(self, symbol = "IBM", \
sd=dt.datetime(2009,1,1), \
ed=dt.datetime(2010,1,1), \
sv = 10000):
def _add_indicators(self, df, symbol):
"""Add indicators for learning to DataFrame."""
df.drop(columns=["SPY"], inplace=True)
indicators.macd(df, symbol)
indicators.rsi(df, symbol)
indicators.price_sma(df, symbol, [8])
indicators.price_delta(df, symbol, 3)
df.dropna(inplace=True)
# here we build a fake set of trades
# your code should return the same sort of data
dates = pd.date_range(sd, ed)
prices_all = ut.get_data([symbol], dates) # automatically adds SPY
trades = prices_all[[symbol,]] # only portfolio symbols
# trades_SPY = prices_all['SPY'] # only SPY, for comparison later
trades.values[:,:] = 0 # set them all to nothing
trades.values[0,:] = 1000 # add a BUY at the start
trades.values[40,:] = -1000 # add a SELL
trades.values[41,:] = 1000 # add a BUY
trades.values[60,:] = -2000 # go short from long
trades.values[61,:] = 2000 # go long from short
trades.values[-1,:] = -1000 #exit on the last day
if self.verbose: print(type(trades)) # it better be a DataFrame!
if self.verbose: print(trades)
if self.verbose: print(prices_all)
return trades
def addEvidence(self, symbol="IBM",
sd=dt.datetime(2008, 1, 1),
ed=dt.datetime(2009, 1, 1),
sv=10000):
self.indicators = ['macd_diff', 'rsi', 'price_sma_8']
df = util.get_data([symbol], pd.date_range(sd, ed))
self._add_indicators(df, symbol)
def classify_y(row):
if row > 0.1:
return 1
elif row < -0.1:
return -1
return 0
self.learner = RTLearner(leaf_size = 7)
# self.learner = BagLearner(RTLearner, 5, {'leaf_size': 5})
data_x = df[self.indicators].to_numpy()
y = df['pct_3'].apply(classify_y)
self.learner.addEvidence(data_x, y.to_numpy())
return y
def strat(self, data_y, orders):
self.holding = 0
def strat(row):
y = int(data_y.loc[row.name][0])
shares = 0
if self.holding == 0 and y == 1:
shares = 1000
elif self.holding == -1000 and y == 1:
shares = 2000
elif self.holding == 0 and y == -1:
shares = -1000
elif self.holding == 1000 and y == -1:
shares = -2000
self.holding += shares
return shares
orders["Shares"] = orders.apply(strat, axis=1)
def testPolicy(self, symbol="IBM",
sd=dt.datetime(2009, 1, 1),
ed=dt.datetime(2010, 1, 1),
sv=10000):
df = util.get_data([symbol], pd.date_range(sd, ed))
self._add_indicators(df, symbol)
data_x = df[self.indicators].to_numpy()
data_y = pd.DataFrame(index=df.index, data=self.learner.query(data_x))
orders = pd.DataFrame(index=df.index)
orders["Symbol"] = symbol
orders["Order"] = ""
orders["Shares"] = 0
self.strat(data_y, orders)
if self.testing:
return orders
else:
return orders[["Shares"]]
if __name__=="__main__":
print("One does not simply think up a strategy")

View File

@@ -93,29 +93,29 @@ def compare_manual_strategies(symbol, sv, sd, ed):
def experiment1():
symbol = "JPM"
start_value = 10000
sv = 10000
sd = dt.datetime(2008, 1, 1) # in-sample
ed = dt.datetime(2009, 12, 31) # in-sample
# sd = dt.datetime(2010, 1, 1) # out-sample
# ed = dt.datetime(2011, 12, 31) # out-sample
sd_out = dt.datetime(2010, 1, 1) # out-sample
ed_out = dt.datetime(2011, 12, 31) # out-sample
df = util.get_data([symbol], pd.date_range(sd, ed_out))
df.drop(columns=["SPY"], inplace=True)
# visualize_correlations(symbol, df)
# plot_indicators(symbol, df)
# compare_manual_strategies(symbol, start_value, sd, ed)
df = util.get_data([symbol], pd.date_range(sd, ed))
df.drop(columns=["SPY"], inplace=True)
# compare_manual_strategies(symbol, sv, sd, ed)
bs = BenchmarkStrategy()
orders = bs.testPolicy(symbol, sd, ed, start_value)
df["Benchmark"] = marketsim.compute_portvals(orders, start_value)
orders = bs.testPolicy(symbol, sd_out, ed_out, sv)
df["Benchmark"] = marketsim.compute_portvals(orders, sv)
df["Orders Benchmark"] = orders["Shares"]
sl = StrategyLearner()
orders = ms.testPolicy(symbol, sd, ed, start_value)
df["SL"] = marketsim.compute_portvals(orders, start_value)
sl = StrategyLearner(testing=True)
sl.addEvidence(symbol, sd, ed, sv)
orders = sl.testPolicy(symbol, sd_out, ed_out, sv)
df["SL"] = marketsim.compute_portvals(orders, sv)
df["Orders SL"] = orders["Shares"]
# df["Holding Manual"] = orders["Shares"].cumsum()
fig, ax = plt.subplots(3, sharex=True)
df[[symbol]].plot(ax=ax[0])
@@ -127,6 +127,12 @@ def experiment1():
MultiCursor(fig.canvas, ax, color='r', lw=0.5)
plt.show()
# For debugging the classification learner:
# df["y_train"] = sl.addEvidence(symbol, sd, ed, sv)
# df["y_query"] = sl.testPolicy(symbol, sd, ed, sv)
# df[["y_train", "y_query"]].plot(ax=ax[1])
if __name__ == "__main__":
experiment1()