Add code for tutorial.

main
Felix Martin 2021-10-11 20:45:38 -04:00
parent 71ceedab45
commit a95016431f
39 changed files with 2368 additions and 0 deletions

1
p0_tutorial/VERSION Normal file
View File

@ -0,0 +1 @@
v1.002

22
p0_tutorial/addition.py Normal file
View File

@ -0,0 +1,22 @@
# addition.py
# -----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
Run python autograder.py
"""
def add(a, b):
"Return the sum of a and b"
"*** YOUR CODE HERE ***"
return 0

358
p0_tutorial/autograder.py Normal file
View File

@ -0,0 +1,358 @@
# autograder.py
# -------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# imports from python standard library
import grading
import imp
import optparse
import os
import re
import sys
import projectParams
import random
random.seed(0)
try:
from pacman import GameState
except:
pass
# register arguments and set default values
def readCommand(argv):
parser = optparse.OptionParser(description = 'Run public tests on student code')
parser.set_defaults(generateSolutions=False, edxOutput=False, gsOutput=False, muteOutput=False, printTestCase=False, noGraphics=False)
parser.add_option('--test-directory',
dest = 'testRoot',
default = 'test_cases',
help = 'Root test directory which contains subdirectories corresponding to each question')
parser.add_option('--student-code',
dest = 'studentCode',
default = projectParams.STUDENT_CODE_DEFAULT,
help = 'comma separated list of student code files')
parser.add_option('--code-directory',
dest = 'codeRoot',
default = "",
help = 'Root directory containing the student and testClass code')
parser.add_option('--test-case-code',
dest = 'testCaseCode',
default = projectParams.PROJECT_TEST_CLASSES,
help = 'class containing testClass classes for this project')
parser.add_option('--generate-solutions',
dest = 'generateSolutions',
action = 'store_true',
help = 'Write solutions generated to .solution file')
parser.add_option('--edx-output',
dest = 'edxOutput',
action = 'store_true',
help = 'Generate edX output files')
parser.add_option('--gradescope-output',
dest = 'gsOutput',
action = 'store_true',
help = 'Generate GradeScope output files')
parser.add_option('--mute',
dest = 'muteOutput',
action = 'store_true',
help = 'Mute output from executing tests')
parser.add_option('--print-tests', '-p',
dest = 'printTestCase',
action = 'store_true',
help = 'Print each test case before running them.')
parser.add_option('--test', '-t',
dest = 'runTest',
default = None,
help = 'Run one particular test. Relative to test root.')
parser.add_option('--question', '-q',
dest = 'gradeQuestion',
default = None,
help = 'Grade one particular question.')
parser.add_option('--no-graphics',
dest = 'noGraphics',
action = 'store_true',
help = 'No graphics display for pacman games.')
(options, args) = parser.parse_args(argv)
return options
# confirm we should author solution files
def confirmGenerate():
print 'WARNING: this action will overwrite any solution files.'
print 'Are you sure you want to proceed? (yes/no)'
while True:
ans = sys.stdin.readline().strip()
if ans == 'yes':
break
elif ans == 'no':
sys.exit(0)
else:
print 'please answer either "yes" or "no"'
# TODO: Fix this so that it tracebacks work correctly
# Looking at source of the traceback module, presuming it works
# the same as the intepreters, it uses co_filename. This is,
# however, a readonly attribute.
def setModuleName(module, filename):
functionType = type(confirmGenerate)
classType = type(optparse.Option)
for i in dir(module):
o = getattr(module, i)
if hasattr(o, '__file__'): continue
if type(o) == functionType:
setattr(o, '__file__', filename)
elif type(o) == classType:
setattr(o, '__file__', filename)
# TODO: assign member __file__'s?
#print i, type(o)
#from cStringIO import StringIO
def loadModuleString(moduleSource):
# Below broken, imp doesn't believe its being passed a file:
# ValueError: load_module arg#2 should be a file or None
#
#f = StringIO(moduleCodeDict[k])
#tmp = imp.load_module(k, f, k, (".py", "r", imp.PY_SOURCE))
tmp = imp.new_module(k)
exec moduleCodeDict[k] in tmp.__dict__
setModuleName(tmp, k)
return tmp
import py_compile
def loadModuleFile(moduleName, filePath):
with open(filePath, 'r') as f:
return imp.load_module(moduleName, f, "%s.py" % moduleName, (".py", "r", imp.PY_SOURCE))
def readFile(path, root=""):
"Read file from disk at specified path and return as string"
with open(os.path.join(root, path), 'r') as handle:
return handle.read()
#######################################################################
# Error Hint Map
#######################################################################
# TODO: use these
ERROR_HINT_MAP = {
'q1': {
"<type 'exceptions.IndexError'>": """
We noticed that your project threw an IndexError on q1.
While many things may cause this, it may have been from
assuming a certain number of successors from a state space
or assuming a certain number of actions available from a given
state. Try making your code more general (no hardcoded indices)
and submit again!
"""
},
'q3': {
"<type 'exceptions.AttributeError'>": """
We noticed that your project threw an AttributeError on q3.
While many things may cause this, it may have been from assuming
a certain size or structure to the state space. For example, if you have
a line of code assuming that the state is (x, y) and we run your code
on a state space with (x, y, z), this error could be thrown. Try
making your code more general and submit again!
"""
}
}
import pprint
def splitStrings(d):
d2 = dict(d)
for k in d:
if k[0:2] == "__":
del d2[k]
continue
if d2[k].find("\n") >= 0:
d2[k] = d2[k].split("\n")
return d2
def printTest(testDict, solutionDict):
pp = pprint.PrettyPrinter(indent=4)
print "Test case:"
for line in testDict["__raw_lines__"]:
print " |", line
print "Solution:"
for line in solutionDict["__raw_lines__"]:
print " |", line
def runTest(testName, moduleDict, printTestCase=False, display=None):
import testParser
import testClasses
for module in moduleDict:
setattr(sys.modules[__name__], module, moduleDict[module])
testDict = testParser.TestParser(testName + ".test").parse()
solutionDict = testParser.TestParser(testName + ".solution").parse()
test_out_file = os.path.join('%s.test_output' % testName)
testDict['test_out_file'] = test_out_file
testClass = getattr(projectTestClasses, testDict['class'])
questionClass = getattr(testClasses, 'Question')
question = questionClass({'max_points': 0}, display)
testCase = testClass(question, testDict)
if printTestCase:
printTest(testDict, solutionDict)
# This is a fragile hack to create a stub grades object
grades = grading.Grades(projectParams.PROJECT_NAME, [(None,0)])
testCase.execute(grades, moduleDict, solutionDict)
# returns all the tests you need to run in order to run question
def getDepends(testParser, testRoot, question):
allDeps = [question]
questionDict = testParser.TestParser(os.path.join(testRoot, question, 'CONFIG')).parse()
if 'depends' in questionDict:
depends = questionDict['depends'].split()
for d in depends:
# run dependencies first
allDeps = getDepends(testParser, testRoot, d) + allDeps
return allDeps
# get list of questions to grade
def getTestSubdirs(testParser, testRoot, questionToGrade):
problemDict = testParser.TestParser(os.path.join(testRoot, 'CONFIG')).parse()
if questionToGrade != None:
questions = getDepends(testParser, testRoot, questionToGrade)
if len(questions) > 1:
print 'Note: due to dependencies, the following tests will be run: %s' % ' '.join(questions)
return questions
if 'order' in problemDict:
return problemDict['order'].split()
return sorted(os.listdir(testRoot))
# evaluate student code
def evaluate(generateSolutions, testRoot, moduleDict, exceptionMap=ERROR_HINT_MAP,
edxOutput=False, muteOutput=False, gsOutput=False,
printTestCase=False, questionToGrade=None, display=None):
# imports of testbench code. note that the testClasses import must follow
# the import of student code due to dependencies
import testParser
import testClasses
for module in moduleDict:
setattr(sys.modules[__name__], module, moduleDict[module])
questions = []
questionDicts = {}
test_subdirs = getTestSubdirs(testParser, testRoot, questionToGrade)
for q in test_subdirs:
subdir_path = os.path.join(testRoot, q)
if not os.path.isdir(subdir_path) or q[0] == '.':
continue
# create a question object
questionDict = testParser.TestParser(os.path.join(subdir_path, 'CONFIG')).parse()
questionClass = getattr(testClasses, questionDict['class'])
question = questionClass(questionDict, display)
questionDicts[q] = questionDict
# load test cases into question
tests = filter(lambda t: re.match('[^#~.].*\.test\Z', t), os.listdir(subdir_path))
tests = map(lambda t: re.match('(.*)\.test\Z', t).group(1), tests)
for t in sorted(tests):
test_file = os.path.join(subdir_path, '%s.test' % t)
solution_file = os.path.join(subdir_path, '%s.solution' % t)
test_out_file = os.path.join(subdir_path, '%s.test_output' % t)
testDict = testParser.TestParser(test_file).parse()
if testDict.get("disabled", "false").lower() == "true":
continue
testDict['test_out_file'] = test_out_file
testClass = getattr(projectTestClasses, testDict['class'])
testCase = testClass(question, testDict)
def makefun(testCase, solution_file):
if generateSolutions:
# write solution file to disk
return lambda grades: testCase.writeSolution(moduleDict, solution_file)
else:
# read in solution dictionary and pass as an argument
testDict = testParser.TestParser(test_file).parse()
solutionDict = testParser.TestParser(solution_file).parse()
if printTestCase:
return lambda grades: printTest(testDict, solutionDict) or testCase.execute(grades, moduleDict, solutionDict)
else:
return lambda grades: testCase.execute(grades, moduleDict, solutionDict)
question.addTestCase(testCase, makefun(testCase, solution_file))
# Note extra function is necessary for scoping reasons
def makefun(question):
return lambda grades: question.execute(grades)
setattr(sys.modules[__name__], q, makefun(question))
questions.append((q, question.getMaxPoints()))
grades = grading.Grades(projectParams.PROJECT_NAME, questions,
gsOutput=gsOutput, edxOutput=edxOutput, muteOutput=muteOutput)
if questionToGrade == None:
for q in questionDicts:
for prereq in questionDicts[q].get('depends', '').split():
grades.addPrereq(q, prereq)
grades.grade(sys.modules[__name__], bonusPic = projectParams.BONUS_PIC)
return grades.points
def getDisplay(graphicsByDefault, options=None):
graphics = graphicsByDefault
if options is not None and options.noGraphics:
graphics = False
if graphics:
try:
import graphicsDisplay
return graphicsDisplay.PacmanGraphics(1, frameTime=.05)
except ImportError:
pass
import textDisplay
return textDisplay.NullGraphics()
if __name__ == '__main__':
options = readCommand(sys.argv)
if options.generateSolutions:
confirmGenerate()
codePaths = options.studentCode.split(',')
# moduleCodeDict = {}
# for cp in codePaths:
# moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
# moduleCodeDict[moduleName] = readFile(cp, root=options.codeRoot)
# moduleCodeDict['projectTestClasses'] = readFile(options.testCaseCode, root=options.codeRoot)
# moduleDict = loadModuleDict(moduleCodeDict)
moduleDict = {}
for cp in codePaths:
moduleName = re.match('.*?([^/]*)\.py', cp).group(1)
moduleDict[moduleName] = loadModuleFile(moduleName, os.path.join(options.codeRoot, cp))
moduleName = re.match('.*?([^/]*)\.py', options.testCaseCode).group(1)
moduleDict['projectTestClasses'] = loadModuleFile(moduleName, os.path.join(options.codeRoot, options.testCaseCode))
if options.runTest != None:
runTest(options.runTest, moduleDict, printTestCase=options.printTestCase, display=getDisplay(True, options))
else:
evaluate(options.generateSolutions, options.testRoot, moduleDict,
gsOutput=options.gsOutput,
edxOutput=options.edxOutput, muteOutput=options.muteOutput, printTestCase=options.printTestCase,
questionToGrade=options.gradeQuestion, display=getDisplay(options.gradeQuestion!=None, options))

View File

@ -0,0 +1,43 @@
# buyLotsOfFruit.py
# -----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
To run this script, type
python buyLotsOfFruit.py
Once you have correctly implemented the buyLotsOfFruit function,
the script should produce the output:
Cost of [('apples', 2.0), ('pears', 3.0), ('limes', 4.0)] is 12.25
"""
fruitPrices = {'apples':2.00, 'oranges': 1.50, 'pears': 1.75,
'limes':0.75, 'strawberries':1.00}
def buyLotsOfFruit(orderList):
"""
orderList: List of (fruit, numPounds) tuples
Returns cost of order
"""
totalCost = 0.0
"*** YOUR CODE HERE ***"
return totalCost
# Main Method
if __name__ == '__main__':
"This code runs when you invoke the script from the command line"
orderList = [ ('apples', 2.0), ('pears', 3.0), ('limes', 4.0) ]
print 'Cost of', orderList, 'is', buyLotsOfFruit(orderList)

323
p0_tutorial/grading.py Normal file
View File

@ -0,0 +1,323 @@
# grading.py
# ----------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"Common code for autograders"
import cgi
import time
import sys
import json
import traceback
import pdb
from collections import defaultdict
import util
class Grades:
"A data structure for project grades, along with formatting code to display them"
def __init__(self, projectName, questionsAndMaxesList,
gsOutput=False, edxOutput=False, muteOutput=False):
"""
Defines the grading scheme for a project
projectName: project name
questionsAndMaxesDict: a list of (question name, max points per question)
"""
self.questions = [el[0] for el in questionsAndMaxesList]
self.maxes = dict(questionsAndMaxesList)
self.points = Counter()
self.messages = dict([(q, []) for q in self.questions])
self.project = projectName
self.start = time.localtime()[1:6]
self.sane = True # Sanity checks
self.currentQuestion = None # Which question we're grading
self.edxOutput = edxOutput
self.gsOutput = gsOutput # GradeScope output
self.mute = muteOutput
self.prereqs = defaultdict(set)
#print 'Autograder transcript for %s' % self.project
print 'Starting on %d-%d at %d:%02d:%02d' % self.start
def addPrereq(self, question, prereq):
self.prereqs[question].add(prereq)
def grade(self, gradingModule, exceptionMap = {}, bonusPic = False):
"""
Grades each question
gradingModule: the module with all the grading functions (pass in with sys.modules[__name__])
"""
completedQuestions = set([])
for q in self.questions:
print '\nQuestion %s' % q
print '=' * (9 + len(q))
print
self.currentQuestion = q
incompleted = self.prereqs[q].difference(completedQuestions)
if len(incompleted) > 0:
prereq = incompleted.pop()
print \
"""*** NOTE: Make sure to complete Question %s before working on Question %s,
*** because Question %s builds upon your answer for Question %s.
""" % (prereq, q, q, prereq)
continue
if self.mute: util.mutePrint()
try:
util.TimeoutFunction(getattr(gradingModule, q),1800)(self) # Call the question's function
#TimeoutFunction(getattr(gradingModule, q),1200)(self) # Call the question's function
except Exception, inst:
self.addExceptionMessage(q, inst, traceback)
self.addErrorHints(exceptionMap, inst, q[1])
except:
self.fail('FAIL: Terminated with a string exception.')
finally:
if self.mute: util.unmutePrint()
if self.points[q] >= self.maxes[q]:
completedQuestions.add(q)
print '\n### Question %s: %d/%d ###\n' % (q, self.points[q], self.maxes[q])
print '\nFinished at %d:%02d:%02d' % time.localtime()[3:6]
print "\nProvisional grades\n=================="
for q in self.questions:
print 'Question %s: %d/%d' % (q, self.points[q], self.maxes[q])
print '------------------'
print 'Total: %d/%d' % (self.points.totalCount(), sum(self.maxes.values()))
if bonusPic and self.points.totalCount() == 25:
print """
ALL HAIL GRANDPAC.
LONG LIVE THE GHOSTBUSTING KING.
--- ---- ---
| \ / + \ / |
| + \--/ \--/ + |
| + + |
| + + + |
@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
\ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
\ / @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
V \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
\ / @@@@@@@@@@@@@@@@@@@@@@@@@@
V @@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@
/\ @@@@@@@@@@@@@@@@@@@@@@
/ \ @@@@@@@@@@@@@@@@@@@@@@@@@
/\ / @@@@@@@@@@@@@@@@@@@@@@@@@@@
/ \ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
/ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@
"""
print """
Your grades are NOT yet registered. To register your grades, make sure
to follow your instructor's guidelines to receive credit on your project.
"""
if self.edxOutput:
self.produceOutput()
if self.gsOutput:
self.produceGradeScopeOutput()
def addExceptionMessage(self, q, inst, traceback):
"""
Method to format the exception message, this is more complicated because
we need to cgi.escape the traceback but wrap the exception in a <pre> tag
"""
self.fail('FAIL: Exception raised: %s' % inst)
self.addMessage('')
for line in traceback.format_exc().split('\n'):
self.addMessage(line)
def addErrorHints(self, exceptionMap, errorInstance, questionNum):
typeOf = str(type(errorInstance))
questionName = 'q' + questionNum
errorHint = ''
# question specific error hints
if exceptionMap.get(questionName):
questionMap = exceptionMap.get(questionName)
if (questionMap.get(typeOf)):
errorHint = questionMap.get(typeOf)
# fall back to general error messages if a question specific
# one does not exist
if (exceptionMap.get(typeOf)):
errorHint = exceptionMap.get(typeOf)
# dont include the HTML if we have no error hint
if not errorHint:
return ''
for line in errorHint.split('\n'):
self.addMessage(line)
def produceGradeScopeOutput(self):
out_dct = {}
# total of entire submission
total_possible = sum(self.maxes.values())
total_score = sum(self.points.values())
out_dct['score'] = total_score
out_dct['max_score'] = total_possible
out_dct['output'] = "Total score (%d / %d)" % (total_score, total_possible)
# individual tests
tests_out = []
for name in self.questions:
test_out = {}
# test name
test_out['name'] = name
# test score
test_out['score'] = self.points[name]
test_out['max_score'] = self.maxes[name]
# others
is_correct = self.points[name] >= self.maxes[name]
test_out['output'] = " Question {num} ({points}/{max}) {correct}".format(
num=(name[1] if len(name) == 2 else name),
points=test_out['score'],
max=test_out['max_score'],
correct=('X' if not is_correct else ''),
)
test_out['tags'] = []
tests_out.append(test_out)
out_dct['tests'] = tests_out
# file output
with open('gradescope_response.json', 'w') as outfile:
json.dump(out_dct, outfile)
return
def produceOutput(self):
edxOutput = open('edx_response.html', 'w')
edxOutput.write("<div>")
# first sum
total_possible = sum(self.maxes.values())
total_score = sum(self.points.values())
checkOrX = '<span class="incorrect"/>'
if (total_score >= total_possible):
checkOrX = '<span class="correct"/>'
header = """
<h3>
Total score ({total_score} / {total_possible})
</h3>
""".format(total_score = total_score,
total_possible = total_possible,
checkOrX = checkOrX
)
edxOutput.write(header)
for q in self.questions:
if len(q) == 2:
name = q[1]
else:
name = q
checkOrX = '<span class="incorrect"/>'
if (self.points[q] >= self.maxes[q]):
checkOrX = '<span class="correct"/>'
#messages = '\n<br/>\n'.join(self.messages[q])
messages = "<pre>%s</pre>" % '\n'.join(self.messages[q])
output = """
<div class="test">
<section>
<div class="shortform">
Question {q} ({points}/{max}) {checkOrX}
</div>
<div class="longform">
{messages}
</div>
</section>
</div>
""".format(q = name,
max = self.maxes[q],
messages = messages,
checkOrX = checkOrX,
points = self.points[q]
)
# print "*** output for Question %s " % q[1]
# print output
edxOutput.write(output)
edxOutput.write("</div>")
edxOutput.close()
edxOutput = open('edx_grade', 'w')
edxOutput.write(str(self.points.totalCount()))
edxOutput.close()
def fail(self, message, raw=False):
"Sets sanity check bit to false and outputs a message"
self.sane = False
self.assignZeroCredit()
self.addMessage(message, raw)
def assignZeroCredit(self):
self.points[self.currentQuestion] = 0
def addPoints(self, amt):
self.points[self.currentQuestion] += amt
def deductPoints(self, amt):
self.points[self.currentQuestion] -= amt
def assignFullCredit(self, message="", raw=False):
self.points[self.currentQuestion] = self.maxes[self.currentQuestion]
if message != "":
self.addMessage(message, raw)
def addMessage(self, message, raw=False):
if not raw:
# We assume raw messages, formatted for HTML, are printed separately
if self.mute: util.unmutePrint()
print '*** ' + message
if self.mute: util.mutePrint()
message = cgi.escape(message)
self.messages[self.currentQuestion].append(message)
def addMessageToEmail(self, message):
print "WARNING**** addMessageToEmail is deprecated %s" % message
for line in message.split('\n'):
pass
#print '%%% ' + line + ' %%%'
#self.messages[self.currentQuestion].append(line)
class Counter(dict):
"""
Dict with default 0
"""
def __getitem__(self, idx):
try:
return dict.__getitem__(self, idx)
except KeyError:
return 0
def totalCount(self):
"""
Returns the sum of counts for all keys.
"""
return sum(self.values())

View File

@ -0,0 +1,18 @@
# projectParams.py
# ----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
STUDENT_CODE_DEFAULT = 'addition.py,buyLotsOfFruit.py,shopSmart.py,shopAroundTown.py'
PROJECT_TEST_CLASSES = 'tutorialTestClasses.py'
PROJECT_NAME = 'Project 0: Tutorial'
BONUS_PIC = False

59
p0_tutorial/shop.py Normal file
View File

@ -0,0 +1,59 @@
# shop.py
# -------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
class FruitShop:
def __init__(self, name, fruitPrices):
"""
name: Name of the fruit shop
fruitPrices: Dictionary with keys as fruit
strings and prices for values e.g.
{'apples':2.00, 'oranges': 1.50, 'pears': 1.75}
"""
self.fruitPrices = fruitPrices
self.name = name
print 'Welcome to %s fruit shop' % (name)
def getCostPerPound(self, fruit):
"""
fruit: Fruit string
Returns cost of 'fruit', assuming 'fruit'
is in our inventory or None otherwise
"""
if fruit not in self.fruitPrices:
return None
return self.fruitPrices[fruit]
def getPriceOfOrder(self, orderList):
"""
orderList: List of (fruit, numPounds) tuples
Returns cost of orderList. If any of the fruit are
"""
totalCost = 0.0
for fruit, numPounds in orderList:
costPerPound = self.getCostPerPound(fruit)
if costPerPound != None:
totalCost += numPounds * costPerPound
return totalCost
def getName(self):
return self.name
def __str__(self):
return "<FruitShop: %s>" % self.getName()
def __repr__(self):
return str(self)

View File

@ -0,0 +1,108 @@
# shopAroundTown.py
# -----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
Here's the intended output of this script, once you fill it in:
Welcome to shop1 fruit shop
Welcome to shop2 fruit shop
Welcome to shop3 fruit shop
Orders: [('apples', 1.0), ('oranges', 3.0), ('limes', 2.0)]
At gas price 1 the best route is: ['shop1', 'shop2', 'shop3']
At gas price 3 the best route is: ['shop1', 'shop3']
At gas price 5 the best route is: ['shop2']
At gas price -1 the best route is: ['shop2', 'shop1', 'shop3']
"""
import shop
import town
def shopAroundTown(orderList, fruitTown, gasCost):
"""
orderList: List of (fruit, numPound) tuples
fruitTown: A Town object
gasCost: A number representing the cost of going one mile
Returns a list of shops in the order that is the optimal route to take when
buying the fruit in the orderList
"""
possibleRoutes = []
subsets = getAllSubsets(fruitTown.getShops())
for subset in subsets:
names = [ shop.getName() for shop in subset ]
if fruitTown.allFruitsCarriedAtShops(orderList, names):
possibleRoutes += getAllPermutations(subset)
minCost, bestRoute = None, None
for route in possibleRoutes:
cost = fruitTown.getPriceOfOrderOnRoute(orderList, route, gasCost)
if minCost == None or cost < minCost:
minCost, bestRoute = cost, route
return bestRoute
def getAllSubsets(lst):
"""
lst: A list
Returns the powerset of lst, i.e. a list of all the possible subsets of lst
"""
if not lst:
return []
withFirst = [ [lst[0]] + rest for rest in getAllSubsets(lst[1:]) ]
withoutFirst = getAllSubsets(lst[1:])
return withFirst + withoutFirst
def getAllPermutations(lst):
"""
lst: A list
Returns a list of all permutations of lst
"""
if not lst:
return []
elif len(lst) == 1:
return lst
allPermutations = []
for i in range(len(lst)):
item = lst[i]
withoutItem = lst[:i] + lst[i:]
allPermutations += prependToAll(item, getAllPermutations(withoutItem))
return allPermutations
def prependToAll(item, lsts):
"""
item: Any object
lsts: A list of lists
Returns a copy of lsts with item prepended to each list contained in lsts
"""
return [ [item] + lst for lst in lsts ]
if __name__ == '__main__':
"This code runs when you invoke the script from the command line"
orders = [('apples', 1.0), ('oranges', 3.0), ('limes', 2.0)]
dir1 = {'apples': 2.0, 'oranges': 1.0}
dir2 = {'apples': 1.0, 'oranges': 5.0, 'limes': 3.0}
dir3 = {'apples': 2.0, 'limes': 2.0}
shop1 = shop.FruitShop('shop1', dir1)
shop2 = shop.FruitShop('shop2', dir2)
shop3 = shop.FruitShop('shop3', dir3)
shops = [shop1, shop2, shop3]
distances = { ('home', 'shop1') : 2,
('home', 'shop2') : 1,
('home', 'shop3') : 1,
('shop1', 'shop2') : 2.5,
('shop1', 'shop3') : 2.5,
('shop2', 'shop3') : 1
}
fruitTown = town.Town(shops, distances)
print "Orders:", orders
for price in (1, 3, 5, -1):
print "At gas price", price, "the best route is:", \
shopAroundTown(orders, fruitTown, price)

44
p0_tutorial/shopSmart.py Normal file
View File

@ -0,0 +1,44 @@
# shopSmart.py
# ------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
Here's the intended output of this script, once you fill it in:
Welcome to shop1 fruit shop
Welcome to shop2 fruit shop
For orders: [('apples', 1.0), ('oranges', 3.0)] best shop is shop1
For orders: [('apples', 3.0)] best shop is shop2
"""
import shop
def shopSmart(orderList, fruitShops):
"""
orderList: List of (fruit, numPound) tuples
fruitShops: List of FruitShops
"""
"*** YOUR CODE HERE ***"
return None
if __name__ == '__main__':
"This code runs when you invoke the script from the command line"
orders = [('apples',1.0), ('oranges',3.0)]
dir1 = {'apples': 2.0, 'oranges':1.0}
shop1 = shop.FruitShop('shop1',dir1)
dir2 = {'apples': 1.0, 'oranges': 5.0}
shop2 = shop.FruitShop('shop2',dir2)
shops = [shop1, shop2]
print "For orders ", orders, ", the best shop is", shopSmart(orders, shops).getName()
orders = [('apples',3.0)]
print "For orders: ", orders, ", the best shop is", shopSmart(orders, shops).getName()

File diff suppressed because one or more lines are too long

206
p0_tutorial/testClasses.py Normal file
View File

@ -0,0 +1,206 @@
# testClasses.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# import modules from python standard library
import inspect
import re
import sys
# Class which models a question in a project. Note that questions have a
# maximum number of points they are worth, and are composed of a series of
# test cases
class Question(object):
def raiseNotDefined(self):
print 'Method not implemented: %s' % inspect.stack()[1][3]
sys.exit(1)
def __init__(self, questionDict, display):
self.maxPoints = int(questionDict['max_points'])
self.testCases = []
self.display = display
def getDisplay(self):
return self.display
def getMaxPoints(self):
return self.maxPoints
# Note that 'thunk' must be a function which accepts a single argument,
# namely a 'grading' object
def addTestCase(self, testCase, thunk):
self.testCases.append((testCase, thunk))
def execute(self, grades):
self.raiseNotDefined()
# Question in which all test cases must be passed in order to receive credit
class PassAllTestsQuestion(Question):
def execute(self, grades):
# TODO: is this the right way to use grades? The autograder doesn't seem to use it.
testsFailed = False
grades.assignZeroCredit()
for _, f in self.testCases:
if not f(grades):
testsFailed = True
if testsFailed:
grades.fail("Tests failed.")
else:
grades.assignFullCredit()
class ExtraCreditPassAllTestsQuestion(Question):
def __init__(self, questionDict, display):
Question.__init__(self, questionDict, display)
self.extraPoints = int(questionDict['extra_points'])
def execute(self, grades):
# TODO: is this the right way to use grades? The autograder doesn't seem to use it.
testsFailed = False
grades.assignZeroCredit()
for _, f in self.testCases:
if not f(grades):
testsFailed = True
if testsFailed:
grades.fail("Tests failed.")
else:
grades.assignFullCredit()
grades.addPoints(self.extraPoints)
# Question in which predict credit is given for test cases with a ``points'' property.
# All other tests are mandatory and must be passed.
class HackedPartialCreditQuestion(Question):
def execute(self, grades):
# TODO: is this the right way to use grades? The autograder doesn't seem to use it.
grades.assignZeroCredit()
points = 0
passed = True
for testCase, f in self.testCases:
testResult = f(grades)
if "points" in testCase.testDict:
if testResult: points += float(testCase.testDict["points"])
else:
passed = passed and testResult
## FIXME: Below terrible hack to match q3's logic
if int(points) == self.maxPoints and not passed:
grades.assignZeroCredit()
else:
grades.addPoints(int(points))
class Q6PartialCreditQuestion(Question):
"""Fails any test which returns False, otherwise doesn't effect the grades object.
Partial credit tests will add the required points."""
def execute(self, grades):
grades.assignZeroCredit()
results = []
for _, f in self.testCases:
results.append(f(grades))
if False in results:
grades.assignZeroCredit()
class PartialCreditQuestion(Question):
"""Fails any test which returns False, otherwise doesn't effect the grades object.
Partial credit tests will add the required points."""
def execute(self, grades):
grades.assignZeroCredit()
for _, f in self.testCases:
if not f(grades):
grades.assignZeroCredit()
grades.fail("Tests failed.")
return False
class NumberPassedQuestion(Question):
"""Grade is the number of test cases passed."""
def execute(self, grades):
grades.addPoints([f(grades) for _, f in self.testCases].count(True))
# Template modeling a generic test case
class TestCase(object):
def raiseNotDefined(self):
print 'Method not implemented: %s' % inspect.stack()[1][3]
sys.exit(1)
def getPath(self):
return self.path
def __init__(self, question, testDict):
self.question = question
self.testDict = testDict
self.path = testDict['path']
self.messages = []
def __str__(self):
self.raiseNotDefined()
def execute(self, grades, moduleDict, solutionDict):
self.raiseNotDefined()
def writeSolution(self, moduleDict, filePath):
self.raiseNotDefined()
return True
# Tests should call the following messages for grading
# to ensure a uniform format for test output.
#
# TODO: this is hairy, but we need to fix grading.py's interface
# to get a nice hierarchical project - question - test structure,
# then these should be moved into Question proper.
def testPass(self, grades):
grades.addMessage('PASS: %s' % (self.path,))
for line in self.messages:
grades.addMessage(' %s' % (line,))
return True
def testFail(self, grades):
grades.addMessage('FAIL: %s' % (self.path,))
for line in self.messages:
grades.addMessage(' %s' % (line,))
return False
# This should really be question level?
#
def testPartial(self, grades, points, maxPoints):
grades.addPoints(points)
extraCredit = max(0, points - maxPoints)
regularCredit = points - extraCredit
grades.addMessage('%s: %s (%s of %s points)' % ("PASS" if points >= maxPoints else "FAIL", self.path, regularCredit, maxPoints))
if extraCredit > 0:
grades.addMessage('EXTRA CREDIT: %s points' % (extraCredit,))
for line in self.messages:
grades.addMessage(' %s' % (line,))
return True
def addMessage(self, message):
self.messages.extend(message.split('\n'))

85
p0_tutorial/testParser.py Normal file
View File

@ -0,0 +1,85 @@
# testParser.py
# -------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import re
import sys
class TestParser(object):
def __init__(self, path):
# save the path to the test file
self.path = path
def removeComments(self, rawlines):
# remove any portion of a line following a '#' symbol
fixed_lines = []
for l in rawlines:
idx = l.find('#')
if idx == -1:
fixed_lines.append(l)
else:
fixed_lines.append(l[0:idx])
return '\n'.join(fixed_lines)
def parse(self):
# read in the test case and remove comments
test = {}
with open(self.path) as handle:
raw_lines = handle.read().split('\n')
test_text = self.removeComments(raw_lines)
test['__raw_lines__'] = raw_lines
test['path'] = self.path
test['__emit__'] = []
lines = test_text.split('\n')
i = 0
# read a property in each loop cycle
while(i < len(lines)):
# skip blank lines
if re.match('\A\s*\Z', lines[i]):
test['__emit__'].append(("raw", raw_lines[i]))
i += 1
continue
m = re.match('\A([^"]*?):\s*"([^"]*)"\s*\Z', lines[i])
if m:
test[m.group(1)] = m.group(2)
test['__emit__'].append(("oneline", m.group(1)))
i += 1
continue
m = re.match('\A([^"]*?):\s*"""\s*\Z', lines[i])
if m:
msg = []
i += 1
while(not re.match('\A\s*"""\s*\Z', lines[i])):
msg.append(raw_lines[i])
i += 1
test[m.group(1)] = '\n'.join(msg)
test['__emit__'].append(("multiline", m.group(1)))
i += 1
continue
print 'error parsing test file: %s' % self.path
sys.exit(1)
return test
def emitTestDict(testDict, handle):
for kind, data in testDict['__emit__']:
if kind == "raw":
handle.write(data + "\n")
elif kind == "oneline":
handle.write('%s: "%s"\n' % (data, testDict[data]))
elif kind == "multiline":
handle.write('%s: """\n%s\n"""\n' % (data, testDict[data]))
else:
raise Exception("Bad __emit__")

View File

@ -0,0 +1 @@
order: "q1 q2 q3"

View File

@ -0,0 +1,2 @@
max_points: "1"
class: "PassAllTestsQuestion"

View File

@ -0,0 +1,3 @@
# This is the solution file for test_cases/q1/addition1.test.
# The result of evaluating the test must equal the below when cast to a string.
result: "2"

View File

@ -0,0 +1,7 @@
class: "EvalTest"
success: "add(a,b) returns the sum of a and b"
failure: "add(a,b) must return the sum of a and b"
# A python expression to be evaluated. This expression must return the
# same result for the student and instructor's code.
test: "addition.add(1,1)"

View File

@ -0,0 +1,3 @@
# This is the solution file for test_cases/q1/addition2.test.
# The result of evaluating the test must equal the below when cast to a string.
result: "5"

View File

@ -0,0 +1,7 @@
class: "EvalTest"
success: "add(a,b) returns the sum of a and b"
failure: "add(a,b) must return the sum of a and b"
# A python expression to be evaluated. This expression must return the
# same result for the student and instructor's code.
test: "addition.add(2,3)"

View File

@ -0,0 +1,3 @@
# This is the solution file for test_cases/q1/addition3.test.
# The result of evaluating the test must equal the below when cast to a string.
result: "7.9"

View File

@ -0,0 +1,7 @@
class: "EvalTest"
success: "add(a,b) returns the sum of a and b"
failure: "add(a,b) must return the sum of a and b"
# A python expression to be evaluated. This expression must return the
# same result for the student and instructor's code.
test: "addition.add(10,-2.1)"

View File

@ -0,0 +1,2 @@
max_points: "1"
class: "PassAllTestsQuestion"

View File

@ -0,0 +1,3 @@
# This is the solution file for test_cases/q2/food_price1.test.
# The result of evaluating the test must equal the below when cast to a string.
result: "12.25"

View File

@ -0,0 +1,7 @@
class: "EvalTest"
success: "buyLotsOfFruit correctly computes the cost of the order"
failure: "buyLotsOfFruit must compute the correct cost of the order"
# A python expression to be evaluated. This expression must return the
# same result for the student and instructor's code.
test: "buyLotsOfFruit.buyLotsOfFruit([ ('apples', 2.0), ('pears',3.0), ('limes',4.0) ])"

View File

@ -0,0 +1,3 @@
# This is the solution file for test_cases/q2/food_price2.test.
# The result of evaluating the test must equal the below when cast to a string.
result: "14.75"

View File

@ -0,0 +1,7 @@
class: "EvalTest"
success: "buyLotsOfFruit correctly computes the cost of the order"
failure: "buyLotsOfFruit must compute the correct cost of the order"
# A python expression to be evaluated. This expression must return the
# same result for the student and instructor's code.
test: "buyLotsOfFruit.buyLotsOfFruit([ ('apples', 4.0), ('pears',3.0), ('limes',2.0) ])"

View File

@ -0,0 +1,3 @@
# This is the solution file for test_cases/q2/food_price3.test.
# The result of evaluating the test must equal the below when cast to a string.
result: "6.4375"

View File

@ -0,0 +1,7 @@
class: "EvalTest"
success: "buyLotsOfFruit correctly computes the cost of the order"
failure: "buyLotsOfFruit must compute the correct cost of the order"
# A python expression to be evaluated. This expression must return the
# same result for the student and instructor's code.
test: "buyLotsOfFruit.buyLotsOfFruit([ ('apples', 1.25), ('pears',1.50), ('limes',1.75) ])"

View File

@ -0,0 +1,2 @@
max_points: "1"
class: "PassAllTestsQuestion"

View File

@ -0,0 +1,3 @@
# This is the solution file for test_cases/q3/select_shop1.test.
# The result of evaluating the test must equal the below when cast to a string.
result: "<FruitShop: shop1>"

View File

@ -0,0 +1,21 @@
class: "EvalTest"
success: "shopSmart(order, shops) selects the cheapest shop"
failure: "shopSmart(order, shops) must select the cheapest shop"
# Python statements initializing variables for the test below.
preamble: """
import shop
dir1 = {'apples': 2.0, 'oranges':1.0}
shop1 = shop.FruitShop('shop1',dir1)
dir2 = {'apples': 1.0, 'oranges': 5.0}
shop2 = shop.FruitShop('shop2',dir2)
shops = [shop1, shop2]
order = [('apples',1.0), ('oranges',3.0)]
ans = shopSmart.shopSmart(order, shops)
"""
# A python expression to be evaluated. This expression must return the
# same result for the student and instructor's code.
test: "ans"

View File

@ -0,0 +1,3 @@
# This is the solution file for test_cases/q3/select_shop2.test.
# The result of evaluating the test must equal the below when cast to a string.
result: "<FruitShop: shop2>"

View File

@ -0,0 +1,21 @@
class: "EvalTest"
success: "shopSmart(order, shops) selects the cheapest shop"
failure: "shopSmart(order, shops) must select the cheapest shop"
# Python statements initializing variables for the test below.
preamble: """
import shop
dir1 = {'apples': 2.0, 'oranges':1.0}
shop1 = shop.FruitShop('shop1',dir1)
dir2 = {'apples': 1.0, 'oranges': 5.0}
shop2 = shop.FruitShop('shop2',dir2)
shops = [shop1, shop2]
order = [('apples',3.0)]
ans = shopSmart.shopSmart(order, shops)
"""
# A python expression to be evaluated. This expression must return the
# same result for the student and instructor's code.
test: "ans"

View File

@ -0,0 +1,3 @@
# This is the solution file for test_cases/q3/select_shop3.test.
# The result of evaluating the test must equal the below when cast to a string.
result: "<FruitShop: shop3>"

View File

@ -0,0 +1,23 @@
class: "EvalTest"
success: "shopSmart(order, shops) selects the cheapest shop"
failure: "shopSmart(order, shops) must select the cheapest shop"
# Python statements initializing variables for the test below.
preamble: """
import shop
dir1 = {'apples': 2.0, 'oranges':1.0}
shop1 = shop.FruitShop('shop1',dir1)
dir2 = {'apples': 1.0, 'oranges': 5.0}
shop2 = shop.FruitShop('shop2',dir2)
dir3 = {'apples': 1.5, 'oranges': 2.0}
shop3 = shop.FruitShop('shop3',dir3)
shops = [shop1, shop2, shop3]
order = [('apples',10.0), ('oranges',3.0)]
ans = shopSmart.shopSmart(order, shops)
"""
# A python expression to be evaluated. This expression must return the
# same result for the student and instructor's code.
test: "ans"

View File

@ -0,0 +1,81 @@
# textDisplay.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import time
try:
import pacman
except:
pass
DRAW_EVERY = 1
SLEEP_TIME = 0 # This can be overwritten by __init__
DISPLAY_MOVES = False
QUIET = False # Supresses output
class NullGraphics:
def initialize(self, state, isBlue = False):
pass
def update(self, state):
pass
def checkNullDisplay(self):
return True
def pause(self):
time.sleep(SLEEP_TIME)
def draw(self, state):
print state
def updateDistributions(self, dist):
pass
def finish(self):
pass
class PacmanGraphics:
def __init__(self, speed=None):
if speed != None:
global SLEEP_TIME
SLEEP_TIME = speed
def initialize(self, state, isBlue = False):
self.draw(state)
self.pause()
self.turn = 0
self.agentCounter = 0
def update(self, state):
numAgents = len(state.agentStates)
self.agentCounter = (self.agentCounter + 1) % numAgents
if self.agentCounter == 0:
self.turn += 1
if DISPLAY_MOVES:
ghosts = [pacman.nearestPoint(state.getGhostPosition(i)) for i in range(1, numAgents)]
print "%4d) P: %-8s" % (self.turn, str(pacman.nearestPoint(state.getPacmanPosition()))),'| Score: %-5d' % state.score,'| Ghosts:', ghosts
if self.turn % DRAW_EVERY == 0:
self.draw(state)
self.pause()
if state._win or state._lose:
self.draw(state)
def pause(self):
time.sleep(SLEEP_TIME)
def draw(self, state):
print state
def finish(self):
pass

105
p0_tutorial/town.py Normal file
View File

@ -0,0 +1,105 @@
# town.py
# -------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import shop
class Town:
def __init__(self, shops, distances):
"""
shops: List of FruitShop objects
distances: Dictionary with keys as pairs (tuples) of names of places
('home' or name strings of FruitShops) and numbers for values which
represent the distance between the two places in miles, e.g.
{('home','shop1') : 1, ('home','shop2') : 1, ('shop1','shop2') : 2}
"""
self.shops = shops
self.distances = distances
def getFruitCostPerPoundOnRoute(self, fruit, route):
"""
fruit: Fruit string
route: List of shop names
Returns the best cost per pound of 'fruit' at any of the shops along
the route. If none of the shops carry 'fruit', returns None
"""
routeShops = [ shop for shop in self.shops if shop.getName() in route ]
costs = []
for shop in routeShops:
cost = shop.getCostPerPound(fruit)
if cost is not None:
costs.append(cost)
if not costs:
# None of the shops carry this fruit
return None
return min(costs)
def allFruitsCarriedAtShops(self, orderList, shops):
"""
orderList: List of (fruit, numPounds) tuples
shops: List of shop names
Returns whether all fruit in the order list can be purchased at at least
one of these shops.
"""
return None not in [self.getFruitCostPerPoundOnRoute(fruit, shops)
for fruit, _ in orderList]
def getDistance(self, loc1, loc2):
"""
loc1: A name of a place ('home' or the name of a FruitShop in town)
loc2: A name of a place ('home' or the name of a FruitShop in town)
Returns the distance between these two places in this town.
"""
if (loc1, loc2) in self.distances:
return self.distances[(loc1, loc2)]
return self.distances[(loc2, loc1)]
def getTotalDistanceOnRoute(self, route):
"""
route: List of shop names
Returns the total distance traveled by starting at 'home', going to
each shop on the route in order, then returning to 'home'
"""
if not route:
return 0
totalDistance = self.getDistance('home', route[0])
for i in xrange(len(route) - 1):
totalDistance += self.getDistance(route[i], route[i+1])
totalDistance += self.getDistance(route[-1], 'home')
return totalDistance
def getPriceOfOrderOnRoute(self, orderList, route, gasCost):
"""
orderList: List of (fruit, numPounds) tuples
route: List of shop names
gasCost: A number representing the cost of driving 1 mile
Returns cost of orderList on this route. If any fruit are not available
on this route, returns None.
"""
totalCost = self.getTotalDistanceOnRoute(route) * gasCost
for fruit, numPounds in orderList:
costPerPound = self.getFruitCostPerPoundOnRoute(fruit, route)
if costPerPound is not None:
totalCost += numPounds * costPerPound
return totalCost
def getShops(self):
return self.shops

View File

@ -0,0 +1,56 @@
# tutorialTestClasses.py
# ----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import testClasses
# Simple test case which evals an arbitrary piece of python code.
# The test is correct if the output of the code given the student's
# solution matches that of the instructor's.
class EvalTest(testClasses.TestCase):
def __init__(self, question, testDict):
super(EvalTest, self).__init__(question, testDict)
self.preamble = compile(testDict.get('preamble', ""), "%s.preamble" % self.getPath(), 'exec')
self.test = compile(testDict['test'], "%s.test" % self.getPath(), 'eval')
self.success = testDict['success']
self.failure = testDict['failure']
def evalCode(self, moduleDict):
bindings = dict(moduleDict)
exec self.preamble in bindings
return str(eval(self.test, bindings))
def execute(self, grades, moduleDict, solutionDict):
result = self.evalCode(moduleDict)
if result == solutionDict['result']:
grades.addMessage('PASS: %s' % self.path)
grades.addMessage('\t%s' % self.success)
return True
else:
grades.addMessage('FAIL: %s' % self.path)
grades.addMessage('\t%s' % self.failure)
grades.addMessage('\tstudent result: "%s"' % result)
grades.addMessage('\tcorrect result: "%s"' % solutionDict['result'])
return False
def writeSolution(self, moduleDict, filePath):
handle = open(filePath, 'w')
handle.write('# This is the solution file for %s.\n' % self.path)
handle.write('# The result of evaluating the test must equal the below when cast to a string.\n')
handle.write('result: "%s"\n' % self.evalCode(moduleDict))
handle.close()
return True

674
p0_tutorial/util.py Normal file
View File

@ -0,0 +1,674 @@
# util.py
# -------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# util.py
# -------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import sys
import inspect
import heapq, random
import cStringIO
class FixedRandom:
def __init__(self):
fixedState = (3, (2147483648L, 507801126L, 683453281L, 310439348L, 2597246090L, \
2209084787L, 2267831527L, 979920060L, 3098657677L, 37650879L, 807947081L, 3974896263L, \
881243242L, 3100634921L, 1334775171L, 3965168385L, 746264660L, 4074750168L, 500078808L, \
776561771L, 702988163L, 1636311725L, 2559226045L, 157578202L, 2498342920L, 2794591496L, \
4130598723L, 496985844L, 2944563015L, 3731321600L, 3514814613L, 3362575829L, 3038768745L, \
2206497038L, 1108748846L, 1317460727L, 3134077628L, 988312410L, 1674063516L, 746456451L, \
3958482413L, 1857117812L, 708750586L, 1583423339L, 3466495450L, 1536929345L, 1137240525L, \
3875025632L, 2466137587L, 1235845595L, 4214575620L, 3792516855L, 657994358L, 1241843248L, \
1695651859L, 3678946666L, 1929922113L, 2351044952L, 2317810202L, 2039319015L, 460787996L, \
3654096216L, 4068721415L, 1814163703L, 2904112444L, 1386111013L, 574629867L, 2654529343L, \
3833135042L, 2725328455L, 552431551L, 4006991378L, 1331562057L, 3710134542L, 303171486L, \
1203231078L, 2670768975L, 54570816L, 2679609001L, 578983064L, 1271454725L, 3230871056L, \
2496832891L, 2944938195L, 1608828728L, 367886575L, 2544708204L, 103775539L, 1912402393L, \
1098482180L, 2738577070L, 3091646463L, 1505274463L, 2079416566L, 659100352L, 839995305L, \
1696257633L, 274389836L, 3973303017L, 671127655L, 1061109122L, 517486945L, 1379749962L, \
3421383928L, 3116950429L, 2165882425L, 2346928266L, 2892678711L, 2936066049L, 1316407868L, \
2873411858L, 4279682888L, 2744351923L, 3290373816L, 1014377279L, 955200944L, 4220990860L, \
2386098930L, 1772997650L, 3757346974L, 1621616438L, 2877097197L, 442116595L, 2010480266L, \
2867861469L, 2955352695L, 605335967L, 2222936009L, 2067554933L, 4129906358L, 1519608541L, \
1195006590L, 1942991038L, 2736562236L, 279162408L, 1415982909L, 4099901426L, 1732201505L, \
2934657937L, 860563237L, 2479235483L, 3081651097L, 2244720867L, 3112631622L, 1636991639L, \
3860393305L, 2312061927L, 48780114L, 1149090394L, 2643246550L, 1764050647L, 3836789087L, \
3474859076L, 4237194338L, 1735191073L, 2150369208L, 92164394L, 756974036L, 2314453957L, \
323969533L, 4267621035L, 283649842L, 810004843L, 727855536L, 1757827251L, 3334960421L, \
3261035106L, 38417393L, 2660980472L, 1256633965L, 2184045390L, 811213141L, 2857482069L, \
2237770878L, 3891003138L, 2787806886L, 2435192790L, 2249324662L, 3507764896L, 995388363L, \
856944153L, 619213904L, 3233967826L, 3703465555L, 3286531781L, 3863193356L, 2992340714L, \
413696855L, 3865185632L, 1704163171L, 3043634452L, 2225424707L, 2199018022L, 3506117517L, \
3311559776L, 3374443561L, 1207829628L, 668793165L, 1822020716L, 2082656160L, 1160606415L, \
3034757648L, 741703672L, 3094328738L, 459332691L, 2702383376L, 1610239915L, 4162939394L, \
557861574L, 3805706338L, 3832520705L, 1248934879L, 3250424034L, 892335058L, 74323433L, \
3209751608L, 3213220797L, 3444035873L, 3743886725L, 1783837251L, 610968664L, 580745246L, \
4041979504L, 201684874L, 2673219253L, 1377283008L, 3497299167L, 2344209394L, 2304982920L, \
3081403782L, 2599256854L, 3184475235L, 3373055826L, 695186388L, 2423332338L, 222864327L, \
1258227992L, 3627871647L, 3487724980L, 4027953808L, 3053320360L, 533627073L, 3026232514L, \
2340271949L, 867277230L, 868513116L, 2158535651L, 2487822909L, 3428235761L, 3067196046L, \
3435119657L, 1908441839L, 788668797L, 3367703138L, 3317763187L, 908264443L, 2252100381L, \
764223334L, 4127108988L, 384641349L, 3377374722L, 1263833251L, 1958694944L, 3847832657L, \
1253909612L, 1096494446L, 555725445L, 2277045895L, 3340096504L, 1383318686L, 4234428127L, \
1072582179L, 94169494L, 1064509968L, 2681151917L, 2681864920L, 734708852L, 1338914021L, \
1270409500L, 1789469116L, 4191988204L, 1716329784L, 2213764829L, 3712538840L, 919910444L, \
1318414447L, 3383806712L, 3054941722L, 3378649942L, 1205735655L, 1268136494L, 2214009444L, \
2532395133L, 3232230447L, 230294038L, 342599089L, 772808141L, 4096882234L, 3146662953L, \
2784264306L, 1860954704L, 2675279609L, 2984212876L, 2466966981L, 2627986059L, 2985545332L, \
2578042598L, 1458940786L, 2944243755L, 3959506256L, 1509151382L, 325761900L, 942251521L, \
4184289782L, 2756231555L, 3297811774L, 1169708099L, 3280524138L, 3805245319L, 3227360276L, \
3199632491L, 2235795585L, 2865407118L, 36763651L, 2441503575L, 3314890374L, 1755526087L, \
17915536L, 1196948233L, 949343045L, 3815841867L, 489007833L, 2654997597L, 2834744136L, \
417688687L, 2843220846L, 85621843L, 747339336L, 2043645709L, 3520444394L, 1825470818L, \
647778910L, 275904777L, 1249389189L, 3640887431L, 4200779599L, 323384601L, 3446088641L, \
4049835786L, 1718989062L, 3563787136L, 44099190L, 3281263107L, 22910812L, 1826109246L, \
745118154L, 3392171319L, 1571490704L, 354891067L, 815955642L, 1453450421L, 940015623L, \
796817754L, 1260148619L, 3898237757L, 176670141L, 1870249326L, 3317738680L, 448918002L, \
4059166594L, 2003827551L, 987091377L, 224855998L, 3520570137L, 789522610L, 2604445123L, \
454472869L, 475688926L, 2990723466L, 523362238L, 3897608102L, 806637149L, 2642229586L, \
2928614432L, 1564415411L, 1691381054L, 3816907227L, 4082581003L, 1895544448L, 3728217394L, \
3214813157L, 4054301607L, 1882632454L, 2873728645L, 3694943071L, 1297991732L, 2101682438L, \
3952579552L, 678650400L, 1391722293L, 478833748L, 2976468591L, 158586606L, 2576499787L, \
662690848L, 3799889765L, 3328894692L, 2474578497L, 2383901391L, 1718193504L, 3003184595L, \
3630561213L, 1929441113L, 3848238627L, 1594310094L, 3040359840L, 3051803867L, 2462788790L, \
954409915L, 802581771L, 681703307L, 545982392L, 2738993819L, 8025358L, 2827719383L, \
770471093L, 3484895980L, 3111306320L, 3900000891L, 2116916652L, 397746721L, 2087689510L, \
721433935L, 1396088885L, 2751612384L, 1998988613L, 2135074843L, 2521131298L, 707009172L, \
2398321482L, 688041159L, 2264560137L, 482388305L, 207864885L, 3735036991L, 3490348331L, \
1963642811L, 3260224305L, 3493564223L, 1939428454L, 1128799656L, 1366012432L, 2858822447L, \
1428147157L, 2261125391L, 1611208390L, 1134826333L, 2374102525L, 3833625209L, 2266397263L, \
3189115077L, 770080230L, 2674657172L, 4280146640L, 3604531615L, 4235071805L, 3436987249L, \
509704467L, 2582695198L, 4256268040L, 3391197562L, 1460642842L, 1617931012L, 457825497L, \
1031452907L, 1330422862L, 4125947620L, 2280712485L, 431892090L, 2387410588L, 2061126784L, \
896457479L, 3480499461L, 2488196663L, 4021103792L, 1877063114L, 2744470201L, 1046140599L, \
2129952955L, 3583049218L, 4217723693L, 2720341743L, 820661843L, 1079873609L, 3360954200L, \
3652304997L, 3335838575L, 2178810636L, 1908053374L, 4026721976L, 1793145418L, 476541615L, \
973420250L, 515553040L, 919292001L, 2601786155L, 1685119450L, 3030170809L, 1590676150L, \
1665099167L, 651151584L, 2077190587L, 957892642L, 646336572L, 2743719258L, 866169074L, \
851118829L, 4225766285L, 963748226L, 799549420L, 1955032629L, 799460000L, 2425744063L, \
2441291571L, 1928963772L, 528930629L, 2591962884L, 3495142819L, 1896021824L, 901320159L, \
3181820243L, 843061941L, 3338628510L, 3782438992L, 9515330L, 1705797226L, 953535929L, \
764833876L, 3202464965L, 2970244591L, 519154982L, 3390617541L, 566616744L, 3438031503L, \
1853838297L, 170608755L, 1393728434L, 676900116L, 3184965776L, 1843100290L, 78995357L, \
2227939888L, 3460264600L, 1745705055L, 1474086965L, 572796246L, 4081303004L, 882828851L, \
1295445825L, 137639900L, 3304579600L, 2722437017L, 4093422709L, 273203373L, 2666507854L, \
3998836510L, 493829981L, 1623949669L, 3482036755L, 3390023939L, 833233937L, 1639668730L, \
1499455075L, 249728260L, 1210694006L, 3836497489L, 1551488720L, 3253074267L, 3388238003L, \
2372035079L, 3945715164L, 2029501215L, 3362012634L, 2007375355L, 4074709820L, 631485888L, \
3135015769L, 4273087084L, 3648076204L, 2739943601L, 1374020358L, 1760722448L, 3773939706L, \
1313027823L, 1895251226L, 4224465911L, 421382535L, 1141067370L, 3660034846L, 3393185650L, \
1850995280L, 1451917312L, 3841455409L, 3926840308L, 1397397252L, 2572864479L, 2500171350L, \
3119920613L, 531400869L, 1626487579L, 1099320497L, 407414753L, 2438623324L, 99073255L, \
3175491512L, 656431560L, 1153671785L, 236307875L, 2824738046L, 2320621382L, 892174056L, \
230984053L, 719791226L, 2718891946L, 624L), None)
self.random = random.Random()
self.random.setstate(fixedState)
"""
Data structures useful for implementing SearchAgents
"""
class Stack:
"A container with a last-in-first-out (LIFO) queuing policy."
def __init__(self):
self.list = []
def push(self,item):
"Push 'item' onto the stack"
self.list.append(item)
def pop(self):
"Pop the most recently pushed item from the stack"
return self.list.pop()
def isEmpty(self):
"Returns true if the stack is empty"
return len(self.list) == 0
class Queue:
"A container with a first-in-first-out (FIFO) queuing policy."
def __init__(self):
self.list = []
def push(self,item):
"Enqueue the 'item' into the queue"
self.list.insert(0,item)
def pop(self):
"""
Dequeue the earliest enqueued item still in the queue. This
operation removes the item from the queue.
"""
return self.list.pop()
def isEmpty(self):
"Returns true if the queue is empty"
return len(self.list) == 0
class PriorityQueue:
"""
Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
"""
def __init__(self):
self.heap = []
self.count = 0
def push(self, item, priority):
entry = (priority, self.count, item)
heapq.heappush(self.heap, entry)
self.count += 1
def pop(self):
(_, _, item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
def update(self, item, priority):
# If item already in priority queue with higher priority, update its priority and rebuild the heap.
# If item already in priority queue with equal or lower priority, do nothing.
# If item not in priority queue, do the same thing as self.push.
for index, (p, c, i) in enumerate(self.heap):
if i == item:
if p <= priority:
break
del self.heap[index]
self.heap.append((priority, c, item))
heapq.heapify(self.heap)
break
else:
self.push(item, priority)
class PriorityQueueWithFunction(PriorityQueue):
"""
Implements a priority queue with the same push/pop signature of the
Queue and the Stack classes. This is designed for drop-in replacement for
those two classes. The caller has to provide a priority function, which
extracts each item's priority.
"""
def __init__(self, priorityFunction):
"priorityFunction (item) -> priority"
self.priorityFunction = priorityFunction # store the priority function
PriorityQueue.__init__(self) # super-class initializer
def push(self, item):
"Adds an item to the queue with priority from the priority function"
PriorityQueue.push(self, item, self.priorityFunction(item))
def manhattanDistance( xy1, xy2 ):
"Returns the Manhattan distance between points xy1 and xy2"
return abs( xy1[0] - xy2[0] ) + abs( xy1[1] - xy2[1] )
"""
Data structures and functions useful for various course projects
The search project should not need anything below this line.
"""
class Counter(dict):
"""
A counter keeps track of counts for a set of keys.
The counter class is an extension of the standard python
dictionary type. It is specialized to have number values
(integers or floats), and includes a handful of additional
functions to ease the task of counting data. In particular,
all keys are defaulted to have value 0. Using a dictionary:
a = {}
print a['test']
would give an error, while the Counter class analogue:
>>> a = Counter()
>>> print a['test']
0
returns the default 0 value. Note that to reference a key
that you know is contained in the counter,
you can still use the dictionary syntax:
>>> a = Counter()
>>> a['test'] = 2
>>> print a['test']
2
This is very useful for counting things without initializing their counts,
see for example:
>>> a['blah'] += 1
>>> print a['blah']
1
The counter also includes additional functionality useful in implementing
the classifiers for this assignment. Two counters can be added,
subtracted or multiplied together. See below for details. They can
also be normalized and their total count and arg max can be extracted.
"""
def __getitem__(self, idx):
self.setdefault(idx, 0)
return dict.__getitem__(self, idx)
def incrementAll(self, keys, count):
"""
Increments all elements of keys by the same count.
>>> a = Counter()
>>> a.incrementAll(['one','two', 'three'], 1)
>>> a['one']
1
>>> a['two']
1
"""
for key in keys:
self[key] += count
def argMax(self):
"""
Returns the key with the highest value.
"""
if len(self.keys()) == 0: return None
all = self.items()
values = [x[1] for x in all]
maxIndex = values.index(max(values))
return all[maxIndex][0]
def sortedKeys(self):
"""
Returns a list of keys sorted by their values. Keys
with the highest values will appear first.
>>> a = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> a['third'] = 1
>>> a.sortedKeys()
['second', 'third', 'first']
"""
sortedItems = self.items()
compare = lambda x, y: sign(y[1] - x[1])
sortedItems.sort(cmp=compare)
return [x[0] for x in sortedItems]
def totalCount(self):
"""
Returns the sum of counts for all keys.
"""
return sum(self.values())
def normalize(self):
"""
Edits the counter such that the total count of all
keys sums to 1. The ratio of counts for all keys
will remain the same. Note that normalizing an empty
Counter will result in an error.
"""
total = float(self.totalCount())
if total == 0: return
for key in self.keys():
self[key] = self[key] / total
def divideAll(self, divisor):
"""
Divides all counts by divisor
"""
divisor = float(divisor)
for key in self:
self[key] /= divisor
def copy(self):
"""
Returns a copy of the counter
"""
return Counter(dict.copy(self))
def __mul__(self, y ):
"""
Multiplying two counters gives the dot product of their vectors where
each unique label is a vector element.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['second'] = 5
>>> a['third'] = 1.5
>>> a['fourth'] = 2.5
>>> a * b
14
"""
sum = 0
x = self
if len(x) > len(y):
x,y = y,x
for key in x:
if key not in y:
continue
sum += x[key] * y[key]
return sum
def __radd__(self, y):
"""
Adding another counter to a counter increments the current counter
by the values stored in the second counter.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> a += b
>>> a['first']
1
"""
for key, value in y.items():
self[key] += value
def __add__( self, y ):
"""
Adding two counters gives a counter with the union of all keys and
counts of the second added to counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a + b)['first']
1
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] + y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = y[key]
return addend
def __sub__( self, y ):
"""
Subtracting a counter from another gives a counter with the union of all keys and
counts of the second subtracted from counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a - b)['first']
-5
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] - y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = -1 * y[key]
return addend
def raiseNotDefined():
fileName = inspect.stack()[1][1]
line = inspect.stack()[1][2]
method = inspect.stack()[1][3]
print "*** Method not implemented: %s at line %s of %s" % (method, line, fileName)
sys.exit(1)
def normalize(vectorOrCounter):
"""
normalize a vector or counter by dividing each value by the sum of all values
"""
normalizedCounter = Counter()
if type(vectorOrCounter) == type(normalizedCounter):
counter = vectorOrCounter
total = float(counter.totalCount())
if total == 0: return counter
for key in counter.keys():
value = counter[key]
normalizedCounter[key] = value / total
return normalizedCounter
else:
vector = vectorOrCounter
s = float(sum(vector))
if s == 0: return vector
return [el / s for el in vector]
def nSample(distribution, values, n):
if sum(distribution) != 1:
distribution = normalize(distribution)
rand = [random.random() for i in range(n)]
rand.sort()
samples = []
samplePos, distPos, cdf = 0,0, distribution[0]
while samplePos < n:
if rand[samplePos] < cdf:
samplePos += 1
samples.append(values[distPos])
else:
distPos += 1
cdf += distribution[distPos]
return samples
def sample(distribution, values = None):
if type(distribution) == Counter:
items = sorted(distribution.items())
distribution = [i[1] for i in items]
values = [i[0] for i in items]
if sum(distribution) != 1:
distribution = normalize(distribution)
choice = random.random()
i, total= 0, distribution[0]
while choice > total:
i += 1
total += distribution[i]
return values[i]
def sampleFromCounter(ctr):
items = sorted(ctr.items())
return sample([v for k,v in items], [k for k,v in items])
def getProbability(value, distribution, values):
"""
Gives the probability of a value under a discrete distribution
defined by (distributions, values).
"""
total = 0.0
for prob, val in zip(distribution, values):
if val == value:
total += prob
return total
def flipCoin( p ):
r = random.random()
return r < p
def chooseFromDistribution( distribution ):
"Takes either a counter or a list of (prob, key) pairs and samples"
if type(distribution) == dict or type(distribution) == Counter:
return sample(distribution)
r = random.random()
base = 0.0
for prob, element in distribution:
base += prob
if r <= base: return element
def nearestPoint( pos ):
"""
Finds the nearest grid point to a position (discretizes).
"""
( current_row, current_col ) = pos
grid_row = int( current_row + 0.5 )
grid_col = int( current_col + 0.5 )
return ( grid_row, grid_col )
def sign( x ):
"""
Returns 1 or -1 depending on the sign of x
"""
if( x >= 0 ):
return 1
else:
return -1
def arrayInvert(array):
"""
Inverts a matrix stored as a list of lists.
"""
result = [[] for i in array]
for outer in array:
for inner in range(len(outer)):
result[inner].append(outer[inner])
return result
def matrixAsList( matrix, value = True ):
"""
Turns a matrix into a list of coordinates matching the specified value
"""
rows, cols = len( matrix ), len( matrix[0] )
cells = []
for row in range( rows ):
for col in range( cols ):
if matrix[row][col] == value:
cells.append( ( row, col ) )
return cells
def lookup(name, namespace):
"""
Get a method or class from any imported module from its name.
Usage: lookup(functionName, globals())
"""
dots = name.count('.')
if dots > 0:
moduleName, objName = '.'.join(name.split('.')[:-1]), name.split('.')[-1]
module = __import__(moduleName)
return getattr(module, objName)
else:
modules = [obj for obj in namespace.values() if str(type(obj)) == "<type 'module'>"]
options = [getattr(module, name) for module in modules if name in dir(module)]
options += [obj[1] for obj in namespace.items() if obj[0] == name ]
if len(options) == 1: return options[0]
if len(options) > 1: raise Exception, 'Name conflict for %s'
raise Exception, '%s not found as a method or class' % name
def pause():
"""
Pauses the output stream awaiting user feedback.
"""
print "<Press enter/return to continue>"
raw_input()
# code to handle timeouts
#
# FIXME
# NOTE: TimeoutFuncton is NOT reentrant. Later timeouts will silently
# disable earlier timeouts. Could be solved by maintaining a global list
# of active time outs. Currently, questions which have test cases calling
# this have all student code so wrapped.
#
import signal
import time
class TimeoutFunctionException(Exception):
"""Exception to raise on a timeout"""
pass
class TimeoutFunction:
def __init__(self, function, timeout):
self.timeout = timeout
self.function = function
def handle_timeout(self, signum, frame):
raise TimeoutFunctionException()
def __call__(self, *args, **keyArgs):
# If we have SIGALRM signal, use it to cause an exception if and
# when this function runs too long. Otherwise check the time taken
# after the method has returned, and throw an exception then.
if hasattr(signal, 'SIGALRM'):
old = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.timeout)
try:
result = self.function(*args, **keyArgs)
finally:
signal.signal(signal.SIGALRM, old)
signal.alarm(0)
else:
startTime = time.time()
result = self.function(*args, **keyArgs)
timeElapsed = time.time() - startTime
if timeElapsed >= self.timeout:
self.handle_timeout(None, None)
return result
_ORIGINAL_STDOUT = None
_ORIGINAL_STDERR = None
_MUTED = False
class WritableNull:
def write(self, string):
pass
def mutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if _MUTED:
return
_MUTED = True
_ORIGINAL_STDOUT = sys.stdout
#_ORIGINAL_STDERR = sys.stderr
sys.stdout = WritableNull()
#sys.stderr = WritableNull()
def unmutePrint():
global _ORIGINAL_STDOUT, _ORIGINAL_STDERR, _MUTED
if not _MUTED:
return
_MUTED = False
sys.stdout = _ORIGINAL_STDOUT
#sys.stderr = _ORIGINAL_STDERR

2
programming.txt Normal file
View File

@ -0,0 +1,2 @@
http://ai.berkeley.edu/projects/release/tutorial/v1/001/tutorial.zip