We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 80190fd commit 3ff5f40Copy full SHA for 3ff5f40
learning.py
@@ -984,8 +984,8 @@ def flatten(seqs): return sum(seqs, [])
984
985
986
def err_ratio(predict, dataset, examples=None, verbose=0):
987
- """Return the proportion of the examples that are NOT correctly predicted."""
988
- """verbose - 0: No output; 1: Output wrong; 2 (or greater): Output correct"""
+ """Return the proportion of the examples that are NOT correctly predicted.
+ verbose - 0: No output; 1: Output wrong; 2 (or greater): Output correct"""
989
if examples is None:
990
examples = dataset.examples
991
if len(examples) == 0:
tests/test_csp.py
@@ -1,5 +1,10 @@
1
import pytest
2
+from utils import failure_test
3
from csp import *
4
+import random
5
+
6
7
+random.seed("aima-python")
8
9
10
def test_csp_assign():
@@ -331,10 +336,12 @@ def test_backtracking_search():
331
336
332
337
333
338
def test_min_conflicts():
334
- random.seed("aima-python")
335
339
assert min_conflicts(australia)
- assert min_conflicts(usa)
340
assert min_conflicts(france)
341
342
+ tests = [(usa, None)] * 3
343
+ assert failure_test(min_conflicts, tests) > 1/3
344
345
australia_impossible = MapColoringCSP(list('RG'), 'SA: WA NT Q NSW V; NT: WA Q; NSW: Q V; T: ')
346
assert min_conflicts(australia_impossible, 1000) is None
347
@@ -351,7 +358,7 @@ def test_parse_neighbours():
351
358
def test_topological_sort():
352
359
root = 'NT'
353
360
Sort, Parents = topological_sort(australia,root)
354
-
361
355
362
assert Sort == ['NT','SA','Q','NSW','V','WA']
356
363
assert Parents['NT'] == None
357
364
assert Parents['SA'] == 'NT'
tests/test_learning.py
@@ -168,9 +168,13 @@ def test_decision_tree_learner():
168
def test_random_forest():
169
iris = DataSet(name="iris")
170
rF = RandomForest(iris)
171
- assert rF([5, 3, 1, 0.1]) == "setosa"
172
- assert rF([6, 5, 3, 1]) == "versicolor"
173
- assert rF([7.5, 4, 6, 2]) == "virginica"
+ tests = [([5.0, 3.0, 1.0, 0.1], "setosa"),
+ ([5.1, 3.3, 1.1, 0.1], "setosa"),
+ ([6.0, 5.0, 3.0, 1.0], "versicolor"),
174
+ ([6.1, 2.2, 3.5, 1.0], "versicolor"),
175
+ ([7.5, 4.1, 6.2, 2.3], "virginica"),
176
+ ([7.3, 3.7, 6.1, 2.5], "virginica")]
177
+ assert grade_learner(rF, tests) >= 1/3
178
179
180
def test_neural_network_learner():
utils.py
@@ -416,6 +416,16 @@ def open_data(name, mode='r'):
416
return open(aima_file)
417
418
419
+def failure_test(algorithm, tests):
420
+ """Grades the given algorithm based on how many tests it passes.
421
+ Most algorithms have arbitary output on correct execution, which is difficult
422
+ to check for correctness. On the other hand, a lot of algorithms output something
423
+ particular on fail (for example, False, or None).
424
+ tests is a list with each element in the form: (values, failure_output)."""
425
+ from statistics import mean
426
+ return mean(int(algorithm(x) != y) for x, y in tests)
427
428
429
# ______________________________________________________________________________
430
# Expressions
431
0 commit comments