RachelPerceptronPython: Difference between revisions
Jump to navigation
Jump to search
New page: <pre> #!/usr/bin/python down = False up = True def dp (inputs, weights) : sum = 0.0 i = 0 while(i < len(inputs)) : sum += inputs[i]*weights[i] i += 1 return sum learnin... |
No edit summary |
||
| Line 6: | Line 6: | ||
def dp (inputs, weights) : | def dp (inputs, weights) : | ||
sum | return sum(i*w for i,w in zip(inputs, weights)) | ||
learning_rate = 0.1 | learning_rate = 0.1 | ||
| Line 23: | Line 17: | ||
prod = dp( inputs, weights ) | prod = dp( inputs, weights ) | ||
print "dot_product: " + str(prod) | print "dot_product: " + str(prod) | ||
if | if prod > threshhold : | ||
return 1 | return 1 | ||
else : | else : | ||
| Line 31: | Line 25: | ||
def bump_weights( inputs, up_or_down ) : | def bump_weights( inputs, up_or_down ) : | ||
print "bump_weights" | print "bump_weights" | ||
x | for x, val in enumerate( inputs ) : | ||
val = inputs[x] | val = inputs[x] | ||
if | if val == 1 : | ||
if | if up_or_down : | ||
weights[x] += learning_rate | weights[x] += learning_rate | ||
else : | else : | ||
weights[x] -= learning_rate | weights[x] -= learning_rate | ||
| Line 69: | Line 61: | ||
bump_weights( inputs, up ) | bump_weights( inputs, up ) | ||
count += 1 | count += 1 | ||
if | if count == len(datasets) and correct != [True,True,True,True]: | ||
count = 0 | count = 0 | ||
print "\n" | print "\n" | ||
</pre> | </pre> | ||
Latest revision as of 12:21, 5 March 2010
#!/usr/bin/python
down = False
up = True
def dp (inputs, weights) :
return sum(i*w for i,w in zip(inputs, weights))
learning_rate = 0.1
threshhold = 0.5
weights = [0,0,0]
def co(inputs) :
print "co"
prod = dp( inputs, weights )
print "dot_product: " + str(prod)
if prod > threshhold :
return 1
else :
return 0
# iterate over inputs and bump the corresponding weights if the input was 1
def bump_weights( inputs, up_or_down ) :
print "bump_weights"
for x, val in enumerate( inputs ) :
val = inputs[x]
if val == 1 :
if up_or_down :
weights[x] += learning_rate
else :
weights[x] -= learning_rate
datasets = [[1,0,0,1],[1,0,1,1],[1,1,0,1],[1,1,1,0]]
#learn([1,1,0],1)
#print weights
# weights remains a global variable
count = 0
correct = [False,False,False,False]
while count < len(datasets) :
dataset = datasets[count]
inputs = dataset[0:3]
expected = dataset[3]
print "inputs: " + ', '.join(str(x) for x in inputs )
print "weights: " + ', '.join(str(x) for x in weights )
result = co( inputs )
print "expected: " + str(expected)
print "and got : " + str(result)
correct[count] = True
if result > expected :
print "too big"
correct[count] = False
bump_weights( inputs, down )
if result < expected :
print "too small"
correct[count] = False
bump_weights( inputs, up )
count += 1
if count == len(datasets) and correct != [True,True,True,True]:
count = 0
print "\n"