from Agenda
+            self.agenda.remove((G, act1))
+
+            # For actions with variable number of arguments, use least commitment principle
+            # act0_temp, bindings = self.find_action_for_precondition(G)
+            # act0 = self.generate_action_object(act0_temp, bindings)
+
+            # Actions = Actions U {act0}
+            self.actions.add(act0)
+
+            # Constraints = add_const(start < act0, Constraints)
+            self.constraints = self.add_const((self.start, act0), self.constraints)
+
+            # for each CL E CausalLinks do
+            #   Constraints = protect(CL, act0, Constraints)
+            for causal_link in self.causal_links:
+                self.constraints = self.protect(causal_link, act0, self.constraints)
+
+            # Agenda = Agenda U {: P is a precondition of act0}
+            for precondition in act0.precond:
+                self.agenda.add((precondition, act0))
+
+            # Constraints = add_const(act0 < act1, Constraints)
+            self.constraints = self.add_const((act0, act1), self.constraints)
+
+            # CausalLinks U {}
+            if (act0, G, act1) not in self.causal_links:
+                self.causal_links.append((act0, G, act1))
+
+            # for each A E Actions do
+            #   Constraints = protect(, A, Constraints)
+            for action in self.actions:
+                self.constraints = self.protect((act0, G, act1), action, self.constraints)
+
+            if step > 200:
+                print("Couldn't find a solution")
+                return None, None
+
+        if display:
+            self.display_plan()
+        else:
+            return self.constraints, self.causal_links
+
+
+def spare_tire_graphPlan():
+    """Solves the spare tire problem using GraphPlan"""
+    return GraphPlan(spare_tire()).execute()
+
+
+def three_block_tower_graphPlan():
+    """Solves the Sussman Anomaly problem using GraphPlan"""
+    return GraphPlan(three_block_tower()).execute()
+
+
+def air_cargo_graphPlan():
+    """Solves the air cargo problem using GraphPlan"""
+    return GraphPlan(air_cargo()).execute()
+
+
+def have_cake_and_eat_cake_too_graphPlan():
+    """Solves the cake problem using GraphPlan"""
+    return [GraphPlan(have_cake_and_eat_cake_too()).execute()[1]]
+
+
+def shopping_graphPlan():
+    """Solves the shopping problem using GraphPlan"""
+    return GraphPlan(shopping_problem()).execute()
+
+
+def socks_and_shoes_graphPlan():
+    """Solves the socks and shoes problem using GraphPlan"""
+    return GraphPlan(socks_and_shoes()).execute()
+
+
+def simple_blocks_world_graphPlan():
+    """Solves the simple blocks world problem"""
+    return GraphPlan(simple_blocks_world()).execute()
 
 
 class HLA(Action):
@@ -565,18 +1422,19 @@ class HLA(Action):
     """
     unique_group = 1
 
-    def __init__(self, action, precond=[None, None], effect=[None, None], duration=0,
-                 consume={}, use={}):
+    def __init__(self, action, precond=None, effect=None, duration=0, consume=None, use=None):
         """
         As opposed to actions, to define HLA, we have added constraints.
         duration holds the amount of time required to execute the task
         consumes holds a dictionary representing the resources the task consumes
         uses holds a dictionary representing the resources the task uses
         """
+        precond = precond or [None]
+        effect = effect or [None]
         super().__init__(action, precond, effect)
         self.duration = duration
-        self.consumes = consume
-        self.uses = use
+        self.consumes = consume or {}
+        self.uses = use or {}
         self.completed = False
         # self.priority = -1 #  must be assigned in relation to other HLAs
         # self.job_group = -1 #  must be assigned in relation to other HLAs
@@ -586,7 +1444,6 @@ def do_action(self, job_order, available_resources, kb, args):
         An HLA based version of act - along with knowledge base updation, it handles
         resource checks, and ensures the actions are executed in the correct order.
         """
-        # print(self.name)
         if not self.has_usable_resource(available_resources):
             raise Exception('Not enough usable resources to execute {}'.format(self.name))
         if not self.has_consumable_resource(available_resources):
@@ -594,10 +1451,11 @@ def do_action(self, job_order, available_resources, kb, args):
         if not self.inorder(job_order):
             raise Exception("Can't execute {} - execute prerequisite actions first".
                             format(self.name))
-        super().act(kb, args)  # update knowledge base
+        kb = super().act(kb, args)  # update knowledge base
         for resource in self.consumes:  # remove consumed resources
             available_resources[resource] -= self.consumes[resource]
         self.completed = True  # set the task status to complete
+        return kb
 
     def has_consumable_resource(self, available_resources):
         """
@@ -636,18 +1494,19 @@ def inorder(self, job_order):
         return True
 
 
-class Problem(PDDL):
+class RealWorldPlanningProblem(PlanningProblem):
     """
     Define real-world problems by aggregating resources as numerical quantities instead of
     named entities.
 
-    This class is identical to PDLL, except that it overloads the act function to handle
+    This class is identical to PDDL, except that it overloads the act function to handle
     resource and ordering conditions imposed by HLA as opposed to Action.
     """
-    def __init__(self, initial_state, actions, goal_test, jobs=None, resources={}):
-        super().__init__(initial_state, actions, goal_test)
+
+    def __init__(self, initial, goals, actions, jobs=None, resources=None):
+        super().__init__(initial, goals, actions)
         self.jobs = jobs
-        self.resources = resources
+        self.resources = resources or {}
 
     def act(self, action):
         """
@@ -662,106 +1521,249 @@ def act(self, action):
         list_action = first(a for a in self.actions if a.name == action.name)
         if list_action is None:
             raise Exception("Action '{}' not found".format(action.name))
-        list_action.do_action(self.jobs, self.resources, self.kb, args)
+        self.initial = list_action.do_action(self.jobs, self.resources, self.initial, args).clauses
 
-    def refinements(hla, state, library):  # TODO - refinements may be (multiple) HLA themselves ...
+    def refinements(self, library):  # refinements may be (multiple) HLA themselves ...
         """
-        state is a Problem, containing the current state kb
-        library is a dictionary containing details for every possible refinement. eg:
+        State is a Problem, containing the current state kb library is a
+        dictionary containing details for every possible refinement. e.g.:
         {
-        "HLA": [
-            "Go(Home,SFO)",
-            "Go(Home,SFO)",
-            "Drive(Home, SFOLongTermParking)",
-            "Shuttle(SFOLongTermParking, SFO)",
-            "Taxi(Home, SFO)"
-               ],
-        "steps": [
-            ["Drive(Home, SFOLongTermParking)", "Shuttle(SFOLongTermParking, SFO)"],
-            ["Taxi(Home, SFO)"],
-            [], # empty refinements ie primitive action
+        'HLA': [
+            'Go(Home, SFO)',
+            'Go(Home, SFO)',
+            'Drive(Home, SFOLongTermParking)',
+            'Shuttle(SFOLongTermParking, SFO)',
+            'Taxi(Home, SFO)'
+            ],
+        'steps': [
+            ['Drive(Home, SFOLongTermParking)', 'Shuttle(SFOLongTermParking, SFO)'],
+            ['Taxi(Home, SFO)'],
+            [],
             [],
             []
-               ],
-        "precond_pos": [
-            ["At(Home), Have(Car)"],
-            ["At(Home)"],
-            ["At(Home)", "Have(Car)"]
-            ["At(SFOLongTermParking)"]
-            ["At(Home)"]
-                       ],
-        "precond_neg": [[],[],[],[],[]],
-        "effect_pos": [
-            ["At(SFO)"],
-            ["At(SFO)"],
-            ["At(SFOLongTermParking)"],
-            ["At(SFO)"],
-            ["At(SFO)"]
-                      ],
-        "effect_neg": [
-            ["At(Home)"],
-            ["At(Home)"],
-            ["At(Home)"],
-            ["At(SFOLongTermParking)"],
-            ["At(Home)"]
-                      ]
-        }
-        """
-        e = Expr(hla.name, hla.args)
-        indices = [i for i, x in enumerate(library["HLA"]) if expr(x).op == hla.name]
+            ],
+        # empty refinements indicate a primitive action
+        'precond': [
+            ['At(Home) & Have(Car)'],
+            ['At(Home)'],
+            ['At(Home) & Have(Car)'],
+            ['At(SFOLongTermParking)'],
+            ['At(Home)']
+            ],
+        'effect': [
+            ['At(SFO) & ~At(Home)'],
+            ['At(SFO) & ~At(Home)'],
+            ['At(SFOLongTermParking) & ~At(Home)'],
+            ['At(SFO) & ~At(SFOLongTermParking)'],
+            ['At(SFO) & ~At(Home)']
+            ]}
+        """
+        indices = [i for i, x in enumerate(library['HLA']) if expr(x).op == self.name]
         for i in indices:
-            action = HLA(expr(library["steps"][i][0]), [  # TODO multiple refinements
-                    [expr(x) for x in library["precond_pos"][i]],
-                    [expr(x) for x in library["precond_neg"][i]]
-                ],
-                [
-                    [expr(x) for x in library["effect_pos"][i]],
-                    [expr(x) for x in library["effect_neg"][i]]
-                ])
-            if action.check_precond(state.kb, action.args):
-                yield action
-
-    def hierarchical_search(problem, hierarchy):
-        """
-        [Figure 11.5] 'Hierarchical Search, a Breadth First Search implementation of Hierarchical
+            actions = []
+            for j in range(len(library['steps'][i])):
+                # find the index of the step [j]  of the HLA
+                index_step = [k for k, x in enumerate(library['HLA']) if x == library['steps'][i][j]][0]
+                precond = library['precond'][index_step][0]  # preconditions of step [j]
+                effect = library['effect'][index_step][0]  # effect of step [j]
+                actions.append(HLA(library['steps'][i][j], precond, effect))
+            yield actions
+
+    def hierarchical_search(self, hierarchy):
+        """
+        [Figure 11.5]
+        'Hierarchical Search, a Breadth First Search implementation of Hierarchical
         Forward Planning Search'
-        The problem is a real-world prodlem defined by the problem class, and the hierarchy is
+        The problem is a real-world problem defined by the problem class, and the hierarchy is
         a dictionary of HLA - refinements (see refinements generator for details)
         """
-        act = Node(problem.actions[0])
-        frontier = FIFOQueue()
+        act = Node(self.initial, None, [self.actions[0]])
+        frontier = deque()
         frontier.append(act)
-        while(True):
+        while True:
             if not frontier:
                 return None
-            plan = frontier.pop()
-            print(plan.state.name)
-            hla = plan.state  # first_or_null(plan)
-            prefix = None
-            if plan.parent:
-                prefix = plan.parent.state.action  # prefix, suffix = subseq(plan.state, hla)
-            outcome = Problem.result(problem, prefix)
-            if hla is None:
+            plan = frontier.popleft()
+            # finds the first non primitive hla in plan actions
+            (hla, index) = RealWorldPlanningProblem.find_hla(plan, hierarchy)
+            prefix = plan.action[:index]
+            outcome = RealWorldPlanningProblem(
+                RealWorldPlanningProblem.result(self.initial, prefix), self.goals, self.actions)
+            suffix = plan.action[index + 1:]
+            if not hla:  # hla is None and plan is primitive
                 if outcome.goal_test():
-                    return plan.path()
+                    return plan.action
             else:
-                print("else")
-                for sequence in Problem.refinements(hla, outcome, hierarchy):
-                    print("...")
-                    frontier.append(Node(plan.state, plan.parent, sequence))
+                for sequence in RealWorldPlanningProblem.refinements(hla, hierarchy):  # find refinements
+                    frontier.append(Node(outcome.initial, plan, prefix + sequence + suffix))
 
-    def result(problem, action):
+    def result(state, actions):
         """The outcome of applying an action to the current problem"""
-        if action is not None:
-            problem.act(action)
-            return problem
-        else:
-            return problem
+        for a in actions:
+            if a.check_precond(state, a.args):
+                state = a(state, a.args).clauses
+        return state
+
+    def angelic_search(self, hierarchy, initial_plan):
+        """
+        [Figure 11.8]
+        A hierarchical planning algorithm that uses angelic semantics to identify and
+        commit to high-level plans that work while avoiding high-level plans that don’t.
+        The predicate MAKING-PROGRESS checks to make sure that we aren’t stuck in an infinite regression
+        of refinements.
+        At top level, call ANGELIC-SEARCH with [Act] as the initialPlan.
+
+        InitialPlan contains a sequence of HLA's with angelic semantics
+
+        The possible effects of an angelic HLA in initialPlan are:
+        ~ : effect remove
+        $+: effect possibly add
+        $-: effect possibly remove
+        $$: possibly add or remove
+        """
+        frontier = deque(initial_plan)
+        while True:
+            if not frontier:
+                return None
+            plan = frontier.popleft()  # sequence of HLA/Angelic HLA's
+            opt_reachable_set = RealWorldPlanningProblem.reach_opt(self.initial, plan)
+            pes_reachable_set = RealWorldPlanningProblem.reach_pes(self.initial, plan)
+            if self.intersects_goal(opt_reachable_set):
+                if RealWorldPlanningProblem.is_primitive(plan, hierarchy):
+                    return [x for x in plan.action]
+                guaranteed = self.intersects_goal(pes_reachable_set)
+                if guaranteed and RealWorldPlanningProblem.making_progress(plan, initial_plan):
+                    final_state = guaranteed[0]  # any element of guaranteed
+                    return RealWorldPlanningProblem.decompose(hierarchy, final_state, pes_reachable_set)
+                # there should be at least one HLA/AngelicHLA, otherwise plan would be primitive
+                hla, index = RealWorldPlanningProblem.find_hla(plan, hierarchy)
+                prefix = plan.action[:index]
+                suffix = plan.action[index + 1:]
+                outcome = RealWorldPlanningProblem(
+                    RealWorldPlanningProblem.result(self.initial, prefix), self.goals, self.actions)
+                for sequence in RealWorldPlanningProblem.refinements(hla, hierarchy):  # find refinements
+                    frontier.append(
+                        AngelicNode(outcome.initial, plan, prefix + sequence + suffix, prefix + sequence + suffix))
+
+    def intersects_goal(self, reachable_set):
+        """
+        Find the intersection of the reachable states and the goal
+        """
+        return [y for x in list(reachable_set.keys())
+                for y in reachable_set[x]
+                if all(goal in y for goal in self.goals)]
+
+    def is_primitive(plan, library):
+        """
+        checks if the hla is primitive action
+        """
+        for hla in plan.action:
+            indices = [i for i, x in enumerate(library['HLA']) if expr(x).op == hla.name]
+            for i in indices:
+                if library["steps"][i]:
+                    return False
+        return True
+
+    def reach_opt(init, plan):
+        """
+        Finds the optimistic reachable set of the sequence of actions in plan
+        """
+        reachable_set = {0: [init]}
+        optimistic_description = plan.action  # list of angelic actions with optimistic description
+        return RealWorldPlanningProblem.find_reachable_set(reachable_set, optimistic_description)
+
+    def reach_pes(init, plan):
+        """
+        Finds the pessimistic reachable set of the sequence of actions in plan
+        """
+        reachable_set = {0: [init]}
+        pessimistic_description = plan.action_pes  # list of angelic actions with pessimistic description
+        return RealWorldPlanningProblem.find_reachable_set(reachable_set, pessimistic_description)
+
+    def find_reachable_set(reachable_set, action_description):
+        """
+        Finds the reachable states of the action_description when applied in each state of reachable set.
+        """
+        for i in range(len(action_description)):
+            reachable_set[i + 1] = []
+            if type(action_description[i]) is AngelicHLA:
+                possible_actions = action_description[i].angelic_action()
+            else:
+                possible_actions = action_description
+            for action in possible_actions:
+                for state in reachable_set[i]:
+                    if action.check_precond(state, action.args):
+                        if action.effect[0]:
+                            new_state = action(state, action.args).clauses
+                            reachable_set[i + 1].append(new_state)
+                        else:
+                            reachable_set[i + 1].append(state)
+        return reachable_set
+
+    def find_hla(plan, hierarchy):
+        """
+        Finds the the first HLA action in plan.action, which is not primitive
+        and its corresponding index in plan.action
+        """
+        hla = None
+        index = len(plan.action)
+        for i in range(len(plan.action)):  # find the first HLA in plan, that is not primitive
+            if not RealWorldPlanningProblem.is_primitive(Node(plan.state, plan.parent, [plan.action[i]]), hierarchy):
+                hla = plan.action[i]
+                index = i
+                break
+        return hla, index
+
+    def making_progress(plan, initial_plan):
+        """
+        Prevents from infinite regression of refinements
+
+        (infinite regression of refinements happens when the algorithm finds a plan that
+        its pessimistic reachable set intersects the goal inside a call to decompose on
+        the same plan, in the same circumstances)
+        """
+        for i in range(len(initial_plan)):
+            if plan == initial_plan[i]:
+                return False
+        return True
+
+    def decompose(hierarchy, plan, s_f, reachable_set):
+        solution = []
+        i = max(reachable_set.keys())
+        while plan.action_pes:
+            action = plan.action_pes.pop()
+            if i == 0:
+                return solution
+            s_i = RealWorldPlanningProblem.find_previous_state(s_f, reachable_set, i, action)
+            problem = RealWorldPlanningProblem(s_i, s_f, plan.action)
+            angelic_call = RealWorldPlanningProblem.angelic_search(problem, hierarchy,
+                                                                   [AngelicNode(s_i, Node(None), [action], [action])])
+            if angelic_call:
+                for x in angelic_call:
+                    solution.insert(0, x)
+            else:
+                return None
+            s_f = s_i
+            i -= 1
+        return solution
+
+    def find_previous_state(s_f, reachable_set, i, action):
+        """
+        Given a final state s_f and an action finds a state s_i in reachable_set
+        such that when action is applied to state s_i returns s_f.
+        """
+        s_i = reachable_set[i - 1][0]
+        for state in reachable_set[i - 1]:
+            if s_f in [x for x in RealWorldPlanningProblem.reach_pes(
+                    state, AngelicNode(state, None, [action], [action]))[1]]:
+                s_i = state
+                break
+        return s_i
 
 
 def job_shop_problem():
     """
-    [figure 11.1] JOB-SHOP-PROBLEM
+    [Figure 11.1] JOB-SHOP-PROBLEM
 
     A job-shop scheduling problem for assembling two cars,
     with resource and ordering constraints.
@@ -783,82 +1785,226 @@ def job_shop_problem():
     True
     >>>
     """
-    init = [expr('Car(C1)'),
-            expr('Car(C2)'),
-            expr('Wheels(W1)'),
-            expr('Wheels(W2)'),
-            expr('Engine(E2)'),
-            expr('Engine(E2)')]
-
-    def goal_test(kb):
-        # print(kb.clauses)
-        required = [expr('Has(C1, W1)'), expr('Has(C1, E1)'), expr('Inspected(C1)'),
-                    expr('Has(C2, W2)'), expr('Has(C2, E2)'), expr('Inspected(C2)')]
-        for q in required:
-            # print(q)
-            # print(kb.ask(q))
-            if kb.ask(q) is False:
-                return False
-        return True
-
     resources = {'EngineHoists': 1, 'WheelStations': 2, 'Inspectors': 2, 'LugNuts': 500}
 
-    # AddEngine1
-    precond_pos = []
-    precond_neg = [expr("Has(C1,E1)")]
-    effect_add = [expr("Has(C1,E1)")]
-    effect_rem = []
-    add_engine1 = HLA(expr("AddEngine1"),
-                      [precond_pos, precond_neg], [effect_add, effect_rem],
-                      duration=30, use={'EngineHoists': 1})
-
-    # AddEngine2
-    precond_pos = []
-    precond_neg = [expr("Has(C2,E2)")]
-    effect_add = [expr("Has(C2,E2)")]
-    effect_rem = []
-    add_engine2 = HLA(expr("AddEngine2"),
-                      [precond_pos, precond_neg], [effect_add, effect_rem],
-                      duration=60, use={'EngineHoists': 1})
-
-    # AddWheels1
-    precond_pos = []
-    precond_neg = [expr("Has(C1,W1)")]
-    effect_add = [expr("Has(C1,W1)")]
-    effect_rem = []
-    add_wheels1 = HLA(expr("AddWheels1"),
-                      [precond_pos, precond_neg], [effect_add, effect_rem],
-                      duration=30, consume={'LugNuts': 20}, use={'WheelStations': 1})
-
-    # AddWheels2
-    precond_pos = []
-    precond_neg = [expr("Has(C2,W2)")]
-    effect_add = [expr("Has(C2,W2)")]
-    effect_rem = []
-    add_wheels2 = HLA(expr("AddWheels2"),
-                      [precond_pos, precond_neg], [effect_add, effect_rem],
-                      duration=15, consume={'LugNuts': 20}, use={'WheelStations': 1})
-
-    # Inspect1
-    precond_pos = []
-    precond_neg = [expr("Inspected(C1)")]
-    effect_add = [expr("Inspected(C1)")]
-    effect_rem = []
-    inspect1 = HLA(expr("Inspect1"),
-                   [precond_pos, precond_neg], [effect_add, effect_rem],
-                   duration=10, use={'Inspectors': 1})
-
-    # Inspect2
-    precond_pos = []
-    precond_neg = [expr("Inspected(C2)")]
-    effect_add = [expr("Inspected(C2)")]
-    effect_rem = []
-    inspect2 = HLA(expr("Inspect2"),
-                   [precond_pos, precond_neg], [effect_add, effect_rem],
-                   duration=10, use={'Inspectors': 1})
+    add_engine1 = HLA('AddEngine1', precond='~Has(C1, E1)', effect='Has(C1, E1)', duration=30, use={'EngineHoists': 1})
+    add_engine2 = HLA('AddEngine2', precond='~Has(C2, E2)', effect='Has(C2, E2)', duration=60, use={'EngineHoists': 1})
+    add_wheels1 = HLA('AddWheels1', precond='~Has(C1, W1)', effect='Has(C1, W1)', duration=30, use={'WheelStations': 1},
+                      consume={'LugNuts': 20})
+    add_wheels2 = HLA('AddWheels2', precond='~Has(C2, W2)', effect='Has(C2, W2)', duration=15, use={'WheelStations': 1},
+                      consume={'LugNuts': 20})
+    inspect1 = HLA('Inspect1', precond='~Inspected(C1)', effect='Inspected(C1)', duration=10, use={'Inspectors': 1})
+    inspect2 = HLA('Inspect2', precond='~Inspected(C2)', effect='Inspected(C2)', duration=10, use={'Inspectors': 1})
+
+    actions = [add_engine1, add_engine2, add_wheels1, add_wheels2, inspect1, inspect2]
 
     job_group1 = [add_engine1, add_wheels1, inspect1]
     job_group2 = [add_engine2, add_wheels2, inspect2]
 
-    return Problem(init, [add_engine1, add_engine2, add_wheels1, add_wheels2, inspect1, inspect2],
-                   goal_test, [job_group1, job_group2], resources)
+    return RealWorldPlanningProblem(
+        initial='Car(C1) & Car(C2) & Wheels(W1) & Wheels(W2) & Engine(E2) & Engine(E2) & ~Has(C1, E1) & ~Has(C2, '
+                'E2) & ~Has(C1, W1) & ~Has(C2, W2) & ~Inspected(C1) & ~Inspected(C2)',
+        goals='Has(C1, W1) & Has(C1, E1) & Inspected(C1) & Has(C2, W2) & Has(C2, E2) & Inspected(C2)',
+        actions=actions,
+        jobs=[job_group1, job_group2],
+        resources=resources)
+
+
+def go_to_sfo():
+    """Go to SFO Problem"""
+
+    go_home_sfo1 = HLA('Go(Home, SFO)', precond='At(Home) & Have(Car)', effect='At(SFO) & ~At(Home)')
+    go_home_sfo2 = HLA('Go(Home, SFO)', precond='At(Home)', effect='At(SFO) & ~At(Home)')
+    drive_home_sfoltp = HLA('Drive(Home, SFOLongTermParking)', precond='At(Home) & Have(Car)',
+                            effect='At(SFOLongTermParking) & ~At(Home)')
+    shuttle_sfoltp_sfo = HLA('Shuttle(SFOLongTermParking, SFO)', precond='At(SFOLongTermParking)',
+                             effect='At(SFO) & ~At(SFOLongTermParking)')
+    taxi_home_sfo = HLA('Taxi(Home, SFO)', precond='At(Home)', effect='At(SFO) & ~At(Home)')
+
+    actions = [go_home_sfo1, go_home_sfo2, drive_home_sfoltp, shuttle_sfoltp_sfo, taxi_home_sfo]
+
+    library = {
+        'HLA': [
+            'Go(Home, SFO)',
+            'Go(Home, SFO)',
+            'Drive(Home, SFOLongTermParking)',
+            'Shuttle(SFOLongTermParking, SFO)',
+            'Taxi(Home, SFO)'
+        ],
+        'steps': [
+            ['Drive(Home, SFOLongTermParking)', 'Shuttle(SFOLongTermParking, SFO)'],
+            ['Taxi(Home, SFO)'],
+            [],
+            [],
+            []
+        ],
+        'precond': [
+            ['At(Home) & Have(Car)'],
+            ['At(Home)'],
+            ['At(Home) & Have(Car)'],
+            ['At(SFOLongTermParking)'],
+            ['At(Home)']
+        ],
+        'effect': [
+            ['At(SFO) & ~At(Home)'],
+            ['At(SFO) & ~At(Home)'],
+            ['At(SFOLongTermParking) & ~At(Home)'],
+            ['At(SFO) & ~At(SFOLongTermParking)'],
+            ['At(SFO) & ~At(Home)']]}
+
+    return RealWorldPlanningProblem(initial='At(Home)', goals='At(SFO)', actions=actions), library
+
+
+class AngelicHLA(HLA):
+    """
+    Define Actions for the real-world (that may be refined further), under angelic semantics
+    """
+
+    def __init__(self, action, precond, effect, duration=0, consume=None, use=None):
+        super().__init__(action, precond, effect, duration, consume, use)
+
+    def convert(self, clauses):
+        """
+        Converts strings into Exprs
+        An HLA with angelic semantics can achieve the effects of simple HLA's (add / remove a variable)
+        and furthermore can have following effects on the variables:
+            Possibly add variable    ( $+ )
+            Possibly remove variable ( $- )
+            Possibly add or remove a variable ( $$ )
+
+        Overrides HLA.convert function
+        """
+        lib = {'~': 'Not',
+               '$+': 'PosYes',
+               '$-': 'PosNot',
+               '$$': 'PosYesNot'}
+
+        if isinstance(clauses, Expr):
+            clauses = conjuncts(clauses)
+            for i in range(len(clauses)):
+                for ch in lib.keys():
+                    if clauses[i].op == ch:
+                        clauses[i] = expr(lib[ch] + str(clauses[i].args[0]))
+
+        elif isinstance(clauses, str):
+            for ch in lib.keys():
+                clauses = clauses.replace(ch, lib[ch])
+            if len(clauses) > 0:
+                clauses = expr(clauses)
+
+            try:
+                clauses = conjuncts(clauses)
+            except AttributeError:
+                pass
+
+        return clauses
+
+    def angelic_action(self):
+        """
+        Converts a high level action (HLA) with angelic semantics into all of its corresponding high level actions (HLA).
+        An HLA with angelic semantics can achieve the effects of simple HLA's (add / remove a variable)
+        and furthermore can have following effects for each variable:
+
+            Possibly add variable ( $+: 'PosYes' )        --> corresponds to two HLAs:
+                                                                HLA_1: add variable
+                                                                HLA_2: leave variable unchanged
+
+            Possibly remove variable ( $-: 'PosNot' )     --> corresponds to two HLAs:
+                                                                HLA_1: remove variable
+                                                                HLA_2: leave variable unchanged
+
+            Possibly add / remove a variable ( $$: 'PosYesNot' )  --> corresponds to three HLAs:
+                                                                        HLA_1: add variable
+                                                                        HLA_2: remove variable
+                                                                        HLA_3: leave variable unchanged
+
+
+            example: the angelic action with effects possibly add A and possibly add or remove B corresponds to the
+            following 6 effects of HLAs:
+
+
+            '$+A & $$B':    HLA_1: 'A & B'   (add A and add B)
+                            HLA_2: 'A & ~B'  (add A and remove B)
+                            HLA_3: 'A'       (add A)
+                            HLA_4: 'B'       (add B)
+                            HLA_5: '~B'      (remove B)
+                            HLA_6: ' '       (no effect)
+
+        """
+
+        effects = [[]]
+        for clause in self.effect:
+            (n, w) = AngelicHLA.compute_parameters(clause)
+            effects = effects * n  # create n copies of effects
+            it = range(1)
+            if len(effects) != 0:
+                # split effects into n sublists (separate n copies created in compute_parameters)
+                it = range(len(effects) // n)
+            for i in it:
+                if effects[i]:
+                    if clause.args:
+                        effects[i] = expr(str(effects[i]) + '&' + str(
+                            Expr(clause.op[w:], clause.args[0])))  # make changes in the ith part of effects
+                        if n == 3:
+                            effects[i + len(effects) // 3] = expr(
+                                str(effects[i + len(effects) // 3]) + '&' + str(Expr(clause.op[6:], clause.args[0])))
+                    else:
+                        effects[i] = expr(
+                            str(effects[i]) + '&' + str(expr(clause.op[w:])))  # make changes in the ith part of effects
+                        if n == 3:
+                            effects[i + len(effects) // 3] = expr(
+                                str(effects[i + len(effects) // 3]) + '&' + str(expr(clause.op[6:])))
+
+                else:
+                    if clause.args:
+                        effects[i] = Expr(clause.op[w:], clause.args[0])  # make changes in the ith part of effects
+                        if n == 3:
+                            effects[i + len(effects) // 3] = Expr(clause.op[6:], clause.args[0])
+
+                    else:
+                        effects[i] = expr(clause.op[w:])  # make changes in the ith part of effects
+                        if n == 3:
+                            effects[i + len(effects) // 3] = expr(clause.op[6:])
+
+        return [HLA(Expr(self.name, self.args), self.precond, effects[i]) for i in range(len(effects))]
+
+    def compute_parameters(clause):
+        """
+        computes n,w
+
+        n = number of HLA effects that the angelic HLA corresponds to
+        w = length of representation of angelic HLA effect
+
+                    n = 1, if effect is add
+                    n = 1, if effect is remove
+                    n = 2, if effect is possibly add
+                    n = 2, if effect is possibly remove
+                    n = 3, if effect is possibly add or remove
+
+        """
+        if clause.op[:9] == 'PosYesNot':
+            # possibly add/remove variable: three possible effects for the variable
+            n = 3
+            w = 9
+        elif clause.op[:6] == 'PosYes':  # possibly add variable: two possible effects for the variable
+            n = 2
+            w = 6
+        elif clause.op[:6] == 'PosNot':  # possibly remove variable: two possible effects for the variable
+            n = 2
+            w = 3  # We want to keep 'Not' from 'PosNot' when adding action
+        else:  # variable or ~variable
+            n = 1
+            w = 0
+        return n, w
+
+
+class AngelicNode(Node):
+    """
+    Extends the class Node.
+    self.action:     contains the optimistic description of an angelic HLA
+    self.action_pes: contains the pessimistic description of an angelic HLA
+    """
+
+    def __init__(self, state, parent=None, action_opt=None, action_pes=None, path_cost=0):
+        super().__init__(state, parent, action_opt, path_cost)
+        self.action_pes = action_pes
diff --git a/planning_angelic_search.ipynb b/planning_angelic_search.ipynb
new file mode 100644
index 000000000..71408e1d9
--- /dev/null
+++ b/planning_angelic_search.ipynb
@@ -0,0 +1,638 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Angelic Search \n",
+    "\n",
+    "Search using angelic semantics (is a hierarchical search), where the agent chooses the implementation of the HLA's. def  angelic_search ( problem ,  hierarchy ,  initialPlan ): \n",
+       "        """ \n",
+       "\t[Figure 11.8] A hierarchical planning algorithm that uses angelic semantics to identify and \n",
+       "\tcommit to high-level plans that work while avoiding high-level plans that don’t.  \n",
+       "\tThe predicate MAKING-PROGRESS checks to make sure that we aren’t stuck in an infinite regression \n",
+       "\tof refinements.  \n",
+       "\tAt top level, call ANGELIC -SEARCH with [Act ] as the initialPlan . \n",
+       "\n",
+       "        initialPlan contains a sequence of HLA's with angelic semantics  \n",
+       "\n",
+       "        The possible effects of an angelic HLA in initialPlan are :  \n",
+       "        ~ : effect remove \n",
+       "        $+: effect possibly add \n",
+       "        $-: effect possibly remove \n",
+       "        $$: possibly add or remove \n",
+       "\t""" \n",
+       "        frontier  =  deque ( initialPlan ) \n",
+       "        while  True :  \n",
+       "            if  not  frontier : \n",
+       "                return  None \n",
+       "            plan  =  frontier . popleft ()  # sequence of HLA/Angelic HLA's  \n",
+       "            opt_reachable_set  =  Problem . reach_opt ( problem . init ,  plan ) \n",
+       "            pes_reachable_set  =  Problem . reach_pes ( problem . init ,  plan ) \n",
+       "            if  problem . intersects_goal ( opt_reachable_set ):  \n",
+       "                if  Problem . is_primitive (  plan ,  hierarchy  ):  \n",
+       "                    return  ([ x  for  x  in  plan . action ]) \n",
+       "                guaranteed  =  problem . intersects_goal ( pes_reachable_set )  \n",
+       "                if  guaranteed  and  Problem . making_progress ( plan ,  initialPlan ): \n",
+       "                    final_state  =  guaranteed [ 0 ]  # any element of guaranteed  \n",
+       "                    #print('decompose') \n",
+       "                    return  Problem . decompose ( hierarchy ,  problem ,  plan ,  final_state ,  pes_reachable_set ) \n",
+       "                ( hla ,  index )  =  Problem . find_hla ( plan ,  hierarchy )  # there should be at least one HLA/Angelic_HLA, otherwise plan would be primitive. \n",
+       "                prefix  =  plan . action [: index ] \n",
+       "                suffix  =  plan . action [ index + 1 :] \n",
+       "                outcome  =  Problem ( Problem . result ( problem . init ,  prefix ),  problem . goals  ,  problem . actions  ) \n",
+       "                for  sequence  in  Problem . refinements ( hla ,  outcome ,  hierarchy ):  # find refinements \n",
+       "                    frontier . append ( Angelic_Node ( outcome . init ,  plan ,  prefix  +  sequence +  suffix ,  prefix + sequence + suffix )) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(Problem.angelic_search)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "\n",
+    "### Decompose \n",
+    "-  Finds recursively the sequence of states and actions that lead us from initial state to goal.\n",
+    "-  For each of the above actions we find their refinements,if they are not primitive, by calling the angelic_search function. \n",
+    "   If there are not refinements return None\n",
+    "   \n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "\n",
+       "  def  decompose ( hierarchy ,  s_0 ,  plan ,  s_f ,  reachable_set ): \n",
+       "        solution  =  []  \n",
+       "        i  =  max ( reachable_set . keys ()) \n",
+       "        while  plan . action_pes :  \n",
+       "            action  =  plan . action_pes . pop () \n",
+       "            if  ( i == 0 ):  \n",
+       "                return  solution \n",
+       "            s_i  =  Problem . find_previous_state ( s_f ,  reachable_set , i ,  action )  \n",
+       "            problem  =  Problem ( s_i ,  s_f  ,  plan . action ) \n",
+       "            angelic_call  =  Problem . angelic_search ( problem ,  hierarchy ,  [ Angelic_Node ( s_i ,  Node ( None ),  [ action ],[ action ])]) \n",
+       "            if  angelic_call : \n",
+       "                for  x  in  angelic_call :  \n",
+       "                    solution . insert ( 0 , x ) \n",
+       "            else :  \n",
+       "                return  None \n",
+       "            s_f  =  s_i \n",
+       "            i -= 1 \n",
+       "        return  solution \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(Problem.decompose)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Example\n",
+    "\n",
+    "Suppose that somebody wants to get to the airport. \n",
+    "The possible ways to do so is either get a taxi, or drive to the airport. class  Level : \n",
+       "    """ \n",
+       "    Contains the state of the planning problem \n",
+       "    and exhaustive list of actions which use the \n",
+       "    states as pre-condition. \n",
+       "    """ \n",
+       "\n",
+       "    def  __init__ ( self ,  kb ): \n",
+       "        """Initializes variables to hold state and action details of a level""" \n",
+       "\n",
+       "        self . kb  =  kb \n",
+       "        # current state \n",
+       "        self . current_state  =  kb . clauses \n",
+       "        # current action to state link \n",
+       "        self . current_action_links  =  {} \n",
+       "        # current state to action link \n",
+       "        self . current_state_links  =  {} \n",
+       "        # current action to next state link \n",
+       "        self . next_action_links  =  {} \n",
+       "        # next state to current action link \n",
+       "        self . next_state_links  =  {} \n",
+       "        # mutually exclusive actions \n",
+       "        self . mutex  =  [] \n",
+       "\n",
+       "    def  __call__ ( self ,  actions ,  objects ): \n",
+       "        self . build ( actions ,  objects ) \n",
+       "        self . find_mutex () \n",
+       "\n",
+       "    def  separate ( self ,  e ): \n",
+       "        """Separates an iterable of elements into positive and negative parts""" \n",
+       "\n",
+       "        positive  =  [] \n",
+       "        negative  =  [] \n",
+       "        for  clause  in  e : \n",
+       "            if  clause . op [: 3 ]  ==  'Not' : \n",
+       "                negative . append ( clause ) \n",
+       "            else : \n",
+       "                positive . append ( clause ) \n",
+       "        return  positive ,  negative \n",
+       "\n",
+       "    def  find_mutex ( self ): \n",
+       "        """Finds mutually exclusive actions""" \n",
+       "\n",
+       "        # Inconsistent effects \n",
+       "        pos_nsl ,  neg_nsl  =  self . separate ( self . next_state_links ) \n",
+       "\n",
+       "        for  negeff  in  neg_nsl : \n",
+       "            new_negeff  =  Expr ( negeff . op [ 3 :],  * negeff . args ) \n",
+       "            for  poseff  in  pos_nsl : \n",
+       "                if  new_negeff  ==  poseff : \n",
+       "                    for  a  in  self . next_state_links [ poseff ]: \n",
+       "                        for  b  in  self . next_state_links [ negeff ]: \n",
+       "                            if  { a ,  b }  not  in  self . mutex : \n",
+       "                                self . mutex . append ({ a ,  b }) \n",
+       "\n",
+       "        # Interference will be calculated with the last step \n",
+       "        pos_csl ,  neg_csl  =  self . separate ( self . current_state_links ) \n",
+       "\n",
+       "        # Competing needs \n",
+       "        for  posprecond  in  pos_csl : \n",
+       "            for  negprecond  in  neg_csl : \n",
+       "                new_negprecond  =  Expr ( negprecond . op [ 3 :],  * negprecond . args ) \n",
+       "                if  new_negprecond  ==  posprecond : \n",
+       "                    for  a  in  self . current_state_links [ posprecond ]: \n",
+       "                        for  b  in  self . current_state_links [ negprecond ]: \n",
+       "                            if  { a ,  b }  not  in  self . mutex : \n",
+       "                                self . mutex . append ({ a ,  b }) \n",
+       "\n",
+       "        # Inconsistent support \n",
+       "        state_mutex  =  [] \n",
+       "        for  pair  in  self . mutex : \n",
+       "            next_state_0  =  self . next_action_links [ list ( pair )[ 0 ]] \n",
+       "            if  len ( pair )  ==  2 : \n",
+       "                next_state_1  =  self . next_action_links [ list ( pair )[ 1 ]] \n",
+       "            else : \n",
+       "                next_state_1  =  self . next_action_links [ list ( pair )[ 0 ]] \n",
+       "            if  ( len ( next_state_0 )  ==  1 )  and  ( len ( next_state_1 )  ==  1 ): \n",
+       "                state_mutex . append ({ next_state_0 [ 0 ],  next_state_1 [ 0 ]}) \n",
+       "        \n",
+       "        self . mutex  =  self . mutex  +  state_mutex \n",
+       "\n",
+       "    def  build ( self ,  actions ,  objects ): \n",
+       "        """Populates the lists and dictionaries containing the state action dependencies""" \n",
+       "\n",
+       "        for  clause  in  self . current_state : \n",
+       "            p_expr  =  Expr ( 'P'  +  clause . op ,  * clause . args ) \n",
+       "            self . current_action_links [ p_expr ]  =  [ clause ] \n",
+       "            self . next_action_links [ p_expr ]  =  [ clause ] \n",
+       "            self . current_state_links [ clause ]  =  [ p_expr ] \n",
+       "            self . next_state_links [ clause ]  =  [ p_expr ] \n",
+       "\n",
+       "        for  a  in  actions : \n",
+       "            num_args  =  len ( a . args ) \n",
+       "            possible_args  =  tuple ( itertools . permutations ( objects ,  num_args )) \n",
+       "\n",
+       "            for  arg  in  possible_args : \n",
+       "                if  a . check_precond ( self . kb ,  arg ): \n",
+       "                    for  num ,  symbol  in  enumerate ( a . args ): \n",
+       "                        if  not  symbol . op . islower (): \n",
+       "                            arg  =  list ( arg ) \n",
+       "                            arg [ num ]  =  symbol \n",
+       "                            arg  =  tuple ( arg ) \n",
+       "\n",
+       "                    new_action  =  a . substitute ( Expr ( a . name ,  * a . args ),  arg ) \n",
+       "                    self . current_action_links [ new_action ]  =  [] \n",
+       "\n",
+       "                    for  clause  in  a . precond : \n",
+       "                        new_clause  =  a . substitute ( clause ,  arg ) \n",
+       "                        self . current_action_links [ new_action ] . append ( new_clause ) \n",
+       "                        if  new_clause  in  self . current_state_links : \n",
+       "                            self . current_state_links [ new_clause ] . append ( new_action ) \n",
+       "                        else : \n",
+       "                            self . current_state_links [ new_clause ]  =  [ new_action ] \n",
+       "                   \n",
+       "                    self . next_action_links [ new_action ]  =  [] \n",
+       "                    for  clause  in  a . effect : \n",
+       "                        new_clause  =  a . substitute ( clause ,  arg ) \n",
+       "\n",
+       "                        self . next_action_links [ new_action ] . append ( new_clause ) \n",
+       "                        if  new_clause  in  self . next_state_links : \n",
+       "                            self . next_state_links [ new_clause ] . append ( new_action ) \n",
+       "                        else : \n",
+       "                            self . next_state_links [ new_clause ]  =  [ new_action ] \n",
+       "\n",
+       "    def  perform_actions ( self ): \n",
+       "        """Performs the necessary actions and returns a new Level""" \n",
+       "\n",
+       "        new_kb  =  FolKB ( list ( set ( self . next_state_links . keys ()))) \n",
+       "        return  Level ( new_kb ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(Level)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Each level stores the following data\n",
+    "1. The current state of the level in `current_state`\n",
+    "2. Links from an action to its preconditions in `current_action_links`\n",
+    "3. Links from a state to the possible actions in that state in `current_state_links`\n",
+    "4. Links from each action to its effects in `next_action_links`\n",
+    "5. Links from each possible next state from each action in `next_state_links`. This stores the same information as the `current_action_links` of the next level.\n",
+    "6. Mutex links in `mutex`.\n",
+    "class  Graph : \n",
+       "    """ \n",
+       "    Contains levels of state and actions \n",
+       "    Used in graph planning algorithm to extract a solution \n",
+       "    """ \n",
+       "\n",
+       "    def  __init__ ( self ,  planningproblem ): \n",
+       "        self . planningproblem  =  planningproblem \n",
+       "        self . kb  =  FolKB ( planningproblem . init ) \n",
+       "        self . levels  =  [ Level ( self . kb )] \n",
+       "        self . objects  =  set ( arg  for  clause  in  self . kb . clauses  for  arg  in  clause . args ) \n",
+       "\n",
+       "    def  __call__ ( self ): \n",
+       "        self . expand_graph () \n",
+       "\n",
+       "    def  expand_graph ( self ): \n",
+       "        """Expands the graph by a level""" \n",
+       "\n",
+       "        last_level  =  self . levels [ - 1 ] \n",
+       "        last_level ( self . planningproblem . actions ,  self . objects ) \n",
+       "        self . levels . append ( last_level . perform_actions ()) \n",
+       "\n",
+       "    def  non_mutex_goals ( self ,  goals ,  index ): \n",
+       "        """Checks whether the goals are mutually exclusive""" \n",
+       "\n",
+       "        goal_perm  =  itertools . combinations ( goals ,  2 ) \n",
+       "        for  g  in  goal_perm : \n",
+       "            if  set ( g )  in  self . levels [ index ] . mutex : \n",
+       "                return  False \n",
+       "        return  True \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(Graph)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The class stores a problem definition in `pddl`, \n",
+    "a knowledge base in `kb`, \n",
+    "a list of `Level` objects in `levels` and \n",
+    "all the possible arguments found in the initial state of the problem in `objects`.\n",
+    "class  GraphPlan : \n",
+       "    """ \n",
+       "    Class for formulation GraphPlan algorithm \n",
+       "    Constructs a graph of state and action space \n",
+       "    Returns solution for the planning problem \n",
+       "    """ \n",
+       "\n",
+       "    def  __init__ ( self ,  planningproblem ): \n",
+       "        self . graph  =  Graph ( planningproblem ) \n",
+       "        self . nogoods  =  [] \n",
+       "        self . solution  =  [] \n",
+       "\n",
+       "    def  check_leveloff ( self ): \n",
+       "        """Checks if the graph has levelled off""" \n",
+       "\n",
+       "        check  =  ( set ( self . graph . levels [ - 1 ] . current_state )  ==  set ( self . graph . levels [ - 2 ] . current_state )) \n",
+       "\n",
+       "        if  check : \n",
+       "            return  True \n",
+       "\n",
+       "    def  extract_solution ( self ,  goals ,  index ): \n",
+       "        """Extracts the solution""" \n",
+       "\n",
+       "        level  =  self . graph . levels [ index ]     \n",
+       "        if  not  self . graph . non_mutex_goals ( goals ,  index ): \n",
+       "            self . nogoods . append (( level ,  goals )) \n",
+       "            return \n",
+       "\n",
+       "        level  =  self . graph . levels [ index  -  1 ]     \n",
+       "\n",
+       "        # Create all combinations of actions that satisfy the goal     \n",
+       "        actions  =  [] \n",
+       "        for  goal  in  goals : \n",
+       "            actions . append ( level . next_state_links [ goal ])     \n",
+       "\n",
+       "        all_actions  =  list ( itertools . product ( * actions ))     \n",
+       "\n",
+       "        # Filter out non-mutex actions \n",
+       "        non_mutex_actions  =  []     \n",
+       "        for  action_tuple  in  all_actions : \n",
+       "            action_pairs  =  itertools . combinations ( list ( set ( action_tuple )),  2 )         \n",
+       "            non_mutex_actions . append ( list ( set ( action_tuple )))         \n",
+       "            for  pair  in  action_pairs :             \n",
+       "                if  set ( pair )  in  level . mutex : \n",
+       "                    non_mutex_actions . pop ( - 1 ) \n",
+       "                    break \n",
+       "    \n",
+       "\n",
+       "        # Recursion \n",
+       "        for  action_list  in  non_mutex_actions :         \n",
+       "            if  [ action_list ,  index ]  not  in  self . solution : \n",
+       "                self . solution . append ([ action_list ,  index ]) \n",
+       "\n",
+       "                new_goals  =  [] \n",
+       "                for  act  in  set ( action_list ):                 \n",
+       "                    if  act  in  level . current_action_links : \n",
+       "                        new_goals  =  new_goals  +  level . current_action_links [ act ] \n",
+       "\n",
+       "                if  abs ( index )  +  1  ==  len ( self . graph . levels ): \n",
+       "                    return \n",
+       "                elif  ( level ,  new_goals )  in  self . nogoods : \n",
+       "                    return \n",
+       "                else : \n",
+       "                    self . extract_solution ( new_goals ,  index  -  1 ) \n",
+       "\n",
+       "        # Level-Order multiple solutions \n",
+       "        solution  =  [] \n",
+       "        for  item  in  self . solution : \n",
+       "            if  item [ 1 ]  ==  - 1 : \n",
+       "                solution . append ([]) \n",
+       "                solution [ - 1 ] . append ( item [ 0 ]) \n",
+       "            else : \n",
+       "                solution [ - 1 ] . append ( item [ 0 ]) \n",
+       "\n",
+       "        for  num ,  item  in  enumerate ( solution ): \n",
+       "            item . reverse () \n",
+       "            solution [ num ]  =  item \n",
+       "\n",
+       "        return  solution \n",
+       "\n",
+       "    def  goal_test ( self ,  kb ): \n",
+       "        return  all ( kb . ask ( q )  is  not  False  for  q  in  self . graph . planningproblem . goals ) \n",
+       "\n",
+       "    def  execute ( self ): \n",
+       "        """Executes the GraphPlan algorithm for the given problem""" \n",
+       "\n",
+       "        while  True : \n",
+       "            self . graph . expand_graph () \n",
+       "            if  ( self . goal_test ( self . graph . levels [ - 1 ] . kb )  and  self . graph . non_mutex_goals ( self . graph . planningproblem . goals ,  - 1 )): \n",
+       "                solution  =  self . extract_solution ( self . graph . planningproblem . goals ,  - 1 ) \n",
+       "                if  solution : \n",
+       "                    return  solution \n",
+       "            \n",
+       "            if  len ( self . graph . levels )  >=  2  and  self . check_leveloff (): \n",
+       "                return  None \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(GraphPlan)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Given a planning problem defined as a PlanningProblem, `GraphPlan` creates a planning graph stored in `graph` and expands it till it reaches a state where all its required goals are present simultaneously without mutual exclusivity.\n",
+    "def  air_cargo_graphplan (): \n",
+       "    """Solves the air cargo problem using GraphPlan""" \n",
+       "    return  GraphPlan ( air_cargo ()) . execute () \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(air_cargo_graphplan)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Let's instantiate the problem and find a solution using this helper function."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 19,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[[[Load(C2, P2, JFK),\n",
+       "   PAirport(SFO),\n",
+       "   PAirport(JFK),\n",
+       "   PPlane(P2),\n",
+       "   PPlane(P1),\n",
+       "   Fly(P2, JFK, SFO),\n",
+       "   PCargo(C2),\n",
+       "   Load(C1, P1, SFO),\n",
+       "   Fly(P1, SFO, JFK),\n",
+       "   PCargo(C1)],\n",
+       "  [Unload(C2, P2, SFO), Unload(C1, P1, JFK)]]]"
+      ]
+     },
+     "execution_count": 19,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "airCargoG = air_cargo_graphplan()\n",
+    "airCargoG"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Each element in the solution is a valid action.\n",
+    "The solution is separated into lists for each level.\n",
+    "The actions prefixed with a 'P' are persistence actions and can be ignored.\n",
+    "They simply carry certain states forward.\n",
+    "We have another helper function `linearize` that presents the solution in a more readable format, much like a total-order planner, but it is _not_ a total-order planner."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 20,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[Load(C2, P2, JFK),\n",
+       " Fly(P2, JFK, SFO),\n",
+       " Load(C1, P1, SFO),\n",
+       " Fly(P1, SFO, JFK),\n",
+       " Unload(C2, P2, SFO),\n",
+       " Unload(C1, P1, JFK)]"
+      ]
+     },
+     "execution_count": 20,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "linearize(airCargoG)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Indeed, this is a correct solution.\n",
+    "def  refinements ( hla ,  state ,  library ):   # refinements may be (multiple) HLA themselves ... \n",
+       "        """ \n",
+       "        state is a Problem, containing the current state kb \n",
+       "        library is a dictionary containing details for every possible refinement. eg: \n",
+       "        { \n",
+       "        'HLA': [ \n",
+       "            'Go(Home, SFO)', \n",
+       "            'Go(Home, SFO)', \n",
+       "            'Drive(Home, SFOLongTermParking)', \n",
+       "            'Shuttle(SFOLongTermParking, SFO)', \n",
+       "            'Taxi(Home, SFO)' \n",
+       "            ], \n",
+       "        'steps': [ \n",
+       "            ['Drive(Home, SFOLongTermParking)', 'Shuttle(SFOLongTermParking, SFO)'], \n",
+       "            ['Taxi(Home, SFO)'], \n",
+       "            [], \n",
+       "            [], \n",
+       "            [] \n",
+       "            ], \n",
+       "        # empty refinements indicate a primitive action \n",
+       "        'precond': [ \n",
+       "            ['At(Home) & Have(Car)'], \n",
+       "            ['At(Home)'], \n",
+       "            ['At(Home) & Have(Car)'], \n",
+       "            ['At(SFOLongTermParking)'], \n",
+       "            ['At(Home)'] \n",
+       "            ], \n",
+       "        'effect': [ \n",
+       "            ['At(SFO) & ~At(Home)'], \n",
+       "            ['At(SFO) & ~At(Home)'], \n",
+       "            ['At(SFOLongTermParking) & ~At(Home)'], \n",
+       "            ['At(SFO) & ~At(SFOLongTermParking)'], \n",
+       "            ['At(SFO) & ~At(Home)'] \n",
+       "            ] \n",
+       "        } \n",
+       "        """ \n",
+       "        e  =  Expr ( hla . name ,  hla . args ) \n",
+       "        indices  =  [ i  for  i ,  x  in  enumerate ( library [ 'HLA' ])  if  expr ( x ) . op  ==  hla . name ] \n",
+       "        for  i  in  indices : \n",
+       "            actions  =  [] \n",
+       "            for  j  in  range ( len ( library [ 'steps' ][ i ])): \n",
+       "                # find the index of the step [j]  of the HLA  \n",
+       "                index_step  =  [ k  for  k , x  in  enumerate ( library [ 'HLA' ])  if  x  ==  library [ 'steps' ][ i ][ j ]][ 0 ] \n",
+       "                precond  =  library [ 'precond' ][ index_step ][ 0 ]  # preconditions of step [j] \n",
+       "                effect  =  library [ 'effect' ][ index_step ][ 0 ]  # effect of step [j] \n",
+       "                actions . append ( HLA ( library [ 'steps' ][ i ][ j ],  precond ,  effect )) \n",
+       "            yield  actions \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(Problem.refinements)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Hierarchical search \n",
+    "\n",
+    "Hierarchical search is a breadth-first implementation of hierarchical forward planning search in the space of refinements. (i.e. repeatedly choose an HLA in the current plan and replace it with one of its refinements, until the plan achieves the goal.) \n",
+    "\n",
+    "def  hierarchical_search ( problem ,  hierarchy ): \n",
+       "        """ \n",
+       "        [Figure 11.5] 'Hierarchical Search, a Breadth First Search implementation of Hierarchical \n",
+       "        Forward Planning Search' \n",
+       "        The problem is a real-world problem defined by the problem class, and the hierarchy is \n",
+       "        a dictionary of HLA - refinements (see refinements generator for details) \n",
+       "        """ \n",
+       "        act  =  Node ( problem . init ,  None ,  [ problem . actions [ 0 ]]) \n",
+       "        frontier  =  deque () \n",
+       "        frontier . append ( act ) \n",
+       "        while  True : \n",
+       "            if  not  frontier : \n",
+       "                return  None \n",
+       "            plan  =  frontier . popleft () \n",
+       "            ( hla ,  index )  =  Problem . find_hla ( plan ,  hierarchy )  # finds the first non primitive hla in plan actions \n",
+       "            prefix  =  plan . action [: index ] \n",
+       "            outcome  =  Problem ( Problem . result ( problem . init ,  prefix ),  problem . goals  ,  problem . actions  ) \n",
+       "            suffix  =  plan . action [ index + 1 :] \n",
+       "            if  not  hla :  # hla is None and plan is primitive \n",
+       "                if  outcome . goal_test (): \n",
+       "                    return  plan . action \n",
+       "            else : \n",
+       "                for  sequence  in  Problem . refinements ( hla ,  outcome ,  hierarchy ):  # find refinements \n",
+       "                    frontier . append ( Node ( outcome . init ,  plan ,  prefix  +  sequence +  suffix )) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(Problem.hierarchical_search)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Example\n",
+    "\n",
+    "Suppose that somebody wants to get to the airport. \n",
+    "The possible ways to do so is either get a taxi, or drive to the airport. class  PartialOrderPlanner : \n",
+       "\n",
+       "    def  __init__ ( self ,  planningproblem ): \n",
+       "        self . planningproblem  =  planningproblem \n",
+       "        self . initialize () \n",
+       "\n",
+       "    def  initialize ( self ): \n",
+       "        """Initialize all variables""" \n",
+       "        self . causal_links  =  [] \n",
+       "        self . start  =  Action ( 'Start' ,  [],  self . planningproblem . init ) \n",
+       "        self . finish  =  Action ( 'Finish' ,  self . planningproblem . goals ,  []) \n",
+       "        self . actions  =  set () \n",
+       "        self . actions . add ( self . start ) \n",
+       "        self . actions . add ( self . finish ) \n",
+       "        self . constraints  =  set () \n",
+       "        self . constraints . add (( self . start ,  self . finish )) \n",
+       "        self . agenda  =  set () \n",
+       "        for  precond  in  self . finish . precond : \n",
+       "            self . agenda . add (( precond ,  self . finish )) \n",
+       "        self . expanded_actions  =  self . expand_actions () \n",
+       "\n",
+       "    def  expand_actions ( self ,  name = None ): \n",
+       "        """Generate all possible actions with variable bindings for precondition selection heuristic""" \n",
+       "\n",
+       "        objects  =  set ( arg  for  clause  in  self . planningproblem . init  for  arg  in  clause . args ) \n",
+       "        expansions  =  [] \n",
+       "        action_list  =  [] \n",
+       "        if  name  is  not  None : \n",
+       "            for  action  in  self . planningproblem . actions : \n",
+       "                if  str ( action . name )  ==  name : \n",
+       "                    action_list . append ( action ) \n",
+       "        else : \n",
+       "            action_list  =  self . planningproblem . actions \n",
+       "\n",
+       "        for  action  in  action_list : \n",
+       "            for  permutation  in  itertools . permutations ( objects ,  len ( action . args )): \n",
+       "                bindings  =  unify ( Expr ( action . name ,  * action . args ),  Expr ( action . name ,  * permutation )) \n",
+       "                if  bindings  is  not  None : \n",
+       "                    new_args  =  [] \n",
+       "                    for  arg  in  action . args : \n",
+       "                        if  arg  in  bindings : \n",
+       "                            new_args . append ( bindings [ arg ]) \n",
+       "                        else : \n",
+       "                            new_args . append ( arg ) \n",
+       "                    new_expr  =  Expr ( str ( action . name ),  * new_args ) \n",
+       "                    new_preconds  =  [] \n",
+       "                    for  precond  in  action . precond : \n",
+       "                        new_precond_args  =  [] \n",
+       "                        for  arg  in  precond . args : \n",
+       "                            if  arg  in  bindings : \n",
+       "                                new_precond_args . append ( bindings [ arg ]) \n",
+       "                            else : \n",
+       "                                new_precond_args . append ( arg ) \n",
+       "                        new_precond  =  Expr ( str ( precond . op ),  * new_precond_args ) \n",
+       "                        new_preconds . append ( new_precond ) \n",
+       "                    new_effects  =  [] \n",
+       "                    for  effect  in  action . effect : \n",
+       "                        new_effect_args  =  [] \n",
+       "                        for  arg  in  effect . args : \n",
+       "                            if  arg  in  bindings : \n",
+       "                                new_effect_args . append ( bindings [ arg ]) \n",
+       "                            else : \n",
+       "                                new_effect_args . append ( arg ) \n",
+       "                        new_effect  =  Expr ( str ( effect . op ),  * new_effect_args ) \n",
+       "                        new_effects . append ( new_effect ) \n",
+       "                    expansions . append ( Action ( new_expr ,  new_preconds ,  new_effects )) \n",
+       "\n",
+       "        return  expansions \n",
+       "\n",
+       "    def  find_open_precondition ( self ): \n",
+       "        """Find open precondition with the least number of possible actions""" \n",
+       "\n",
+       "        number_of_ways  =  dict () \n",
+       "        actions_for_precondition  =  dict () \n",
+       "        for  element  in  self . agenda : \n",
+       "            open_precondition  =  element [ 0 ] \n",
+       "            possible_actions  =  list ( self . actions )  +  self . expanded_actions \n",
+       "            for  action  in  possible_actions : \n",
+       "                for  effect  in  action . effect : \n",
+       "                    if  effect  ==  open_precondition : \n",
+       "                        if  open_precondition  in  number_of_ways : \n",
+       "                            number_of_ways [ open_precondition ]  +=  1 \n",
+       "                            actions_for_precondition [ open_precondition ] . append ( action ) \n",
+       "                        else : \n",
+       "                            number_of_ways [ open_precondition ]  =  1 \n",
+       "                            actions_for_precondition [ open_precondition ]  =  [ action ] \n",
+       "\n",
+       "        number  =  sorted ( number_of_ways ,  key = number_of_ways . __getitem__ ) \n",
+       "        \n",
+       "        for  k ,  v  in  number_of_ways . items (): \n",
+       "            if  v  ==  0 : \n",
+       "                return  None ,  None ,  None \n",
+       "\n",
+       "        act1  =  None \n",
+       "        for  element  in  self . agenda : \n",
+       "            if  element [ 0 ]  ==  number [ 0 ]: \n",
+       "                act1  =  element [ 1 ] \n",
+       "                break \n",
+       "\n",
+       "        if  number [ 0 ]  in  self . expanded_actions : \n",
+       "            self . expanded_actions . remove ( number [ 0 ]) \n",
+       "\n",
+       "        return  number [ 0 ],  act1 ,  actions_for_precondition [ number [ 0 ]] \n",
+       "\n",
+       "    def  find_action_for_precondition ( self ,  oprec ): \n",
+       "        """Find action for a given precondition""" \n",
+       "\n",
+       "        # either \n",
+       "        #   choose act0 E Actions such that act0 achieves G \n",
+       "        for  action  in  self . actions : \n",
+       "            for  effect  in  action . effect : \n",
+       "                if  effect  ==  oprec : \n",
+       "                    return  action ,  0 \n",
+       "\n",
+       "        # or \n",
+       "        #   choose act0 E Actions such that act0 achieves G \n",
+       "        for  action  in  self . planningproblem . actions : \n",
+       "            for  effect  in  action . effect : \n",
+       "                if  effect . op  ==  oprec . op : \n",
+       "                    bindings  =  unify ( effect ,  oprec ) \n",
+       "                    if  bindings  is  None : \n",
+       "                        break \n",
+       "                    return  action ,  bindings \n",
+       "\n",
+       "    def  generate_expr ( self ,  clause ,  bindings ): \n",
+       "        """Generate atomic expression from generic expression given variable bindings""" \n",
+       "\n",
+       "        new_args  =  [] \n",
+       "        for  arg  in  clause . args : \n",
+       "            if  arg  in  bindings : \n",
+       "                new_args . append ( bindings [ arg ]) \n",
+       "            else : \n",
+       "                new_args . append ( arg ) \n",
+       "\n",
+       "        try : \n",
+       "            return  Expr ( str ( clause . name ),  * new_args ) \n",
+       "        except : \n",
+       "            return  Expr ( str ( clause . op ),  * new_args ) \n",
+       "        \n",
+       "    def  generate_action_object ( self ,  action ,  bindings ): \n",
+       "        """Generate action object given a generic action andvariable bindings""" \n",
+       "\n",
+       "        # if bindings is 0, it means the action already exists in self.actions \n",
+       "        if  bindings  ==  0 : \n",
+       "            return  action \n",
+       "\n",
+       "        # bindings cannot be None \n",
+       "        else : \n",
+       "            new_expr  =  self . generate_expr ( action ,  bindings ) \n",
+       "            new_preconds  =  [] \n",
+       "            for  precond  in  action . precond : \n",
+       "                new_precond  =  self . generate_expr ( precond ,  bindings ) \n",
+       "                new_preconds . append ( new_precond ) \n",
+       "            new_effects  =  [] \n",
+       "            for  effect  in  action . effect : \n",
+       "                new_effect  =  self . generate_expr ( effect ,  bindings ) \n",
+       "                new_effects . append ( new_effect ) \n",
+       "            return  Action ( new_expr ,  new_preconds ,  new_effects ) \n",
+       "\n",
+       "    def  cyclic ( self ,  graph ): \n",
+       "        """Check cyclicity of a directed graph""" \n",
+       "\n",
+       "        new_graph  =  dict () \n",
+       "        for  element  in  graph : \n",
+       "            if  element [ 0 ]  in  new_graph : \n",
+       "                new_graph [ element [ 0 ]] . append ( element [ 1 ]) \n",
+       "            else : \n",
+       "                new_graph [ element [ 0 ]]  =  [ element [ 1 ]] \n",
+       "\n",
+       "        path  =  set () \n",
+       "\n",
+       "        def  visit ( vertex ): \n",
+       "            path . add ( vertex ) \n",
+       "            for  neighbor  in  new_graph . get ( vertex ,  ()): \n",
+       "                if  neighbor  in  path  or  visit ( neighbor ): \n",
+       "                    return  True \n",
+       "            path . remove ( vertex ) \n",
+       "            return  False \n",
+       "\n",
+       "        value  =  any ( visit ( v )  for  v  in  new_graph ) \n",
+       "        return  value \n",
+       "\n",
+       "    def  add_const ( self ,  constraint ,  constraints ): \n",
+       "        """Add the constraint to constraints if the resulting graph is acyclic""" \n",
+       "\n",
+       "        if  constraint [ 0 ]  ==  self . finish  or  constraint [ 1 ]  ==  self . start : \n",
+       "            return  constraints \n",
+       "\n",
+       "        new_constraints  =  set ( constraints ) \n",
+       "        new_constraints . add ( constraint ) \n",
+       "\n",
+       "        if  self . cyclic ( new_constraints ): \n",
+       "            return  constraints \n",
+       "        return  new_constraints \n",
+       "\n",
+       "    def  is_a_threat ( self ,  precondition ,  effect ): \n",
+       "        """Check if effect is a threat to precondition""" \n",
+       "\n",
+       "        if  ( str ( effect . op )  ==  'Not'  +  str ( precondition . op ))  or  ( 'Not'  +  str ( effect . op )  ==  str ( precondition . op )): \n",
+       "            if  effect . args  ==  precondition . args : \n",
+       "                return  True \n",
+       "        return  False \n",
+       "\n",
+       "    def  protect ( self ,  causal_link ,  action ,  constraints ): \n",
+       "        """Check and resolve threats by promotion or demotion""" \n",
+       "\n",
+       "        threat  =  False \n",
+       "        for  effect  in  action . effect : \n",
+       "            if  self . is_a_threat ( causal_link [ 1 ],  effect ): \n",
+       "                threat  =  True \n",
+       "                break \n",
+       "\n",
+       "        if  action  !=  causal_link [ 0 ]  and  action  !=  causal_link [ 2 ]  and  threat : \n",
+       "            # try promotion \n",
+       "            new_constraints  =  set ( constraints ) \n",
+       "            new_constraints . add (( action ,  causal_link [ 0 ])) \n",
+       "            if  not  self . cyclic ( new_constraints ): \n",
+       "                constraints  =  self . add_const (( action ,  causal_link [ 0 ]),  constraints ) \n",
+       "            else : \n",
+       "                # try demotion \n",
+       "                new_constraints  =  set ( constraints ) \n",
+       "                new_constraints . add (( causal_link [ 2 ],  action )) \n",
+       "                if  not  self . cyclic ( new_constraints ): \n",
+       "                    constraints  =  self . add_const (( causal_link [ 2 ],  action ),  constraints ) \n",
+       "                else : \n",
+       "                    # both promotion and demotion fail \n",
+       "                    print ( 'Unable to resolve a threat caused by' ,  action ,  'onto' ,  causal_link ) \n",
+       "                    return \n",
+       "        return  constraints \n",
+       "\n",
+       "    def  convert ( self ,  constraints ): \n",
+       "        """Convert constraints into a dict of Action to set orderings""" \n",
+       "\n",
+       "        graph  =  dict () \n",
+       "        for  constraint  in  constraints : \n",
+       "            if  constraint [ 0 ]  in  graph : \n",
+       "                graph [ constraint [ 0 ]] . add ( constraint [ 1 ]) \n",
+       "            else : \n",
+       "                graph [ constraint [ 0 ]]  =  set () \n",
+       "                graph [ constraint [ 0 ]] . add ( constraint [ 1 ]) \n",
+       "        return  graph \n",
+       "\n",
+       "    def  toposort ( self ,  graph ): \n",
+       "        """Generate topological ordering of constraints""" \n",
+       "\n",
+       "        if  len ( graph )  ==  0 : \n",
+       "            return \n",
+       "\n",
+       "        graph  =  graph . copy () \n",
+       "\n",
+       "        for  k ,  v  in  graph . items (): \n",
+       "            v . discard ( k ) \n",
+       "\n",
+       "        extra_elements_in_dependencies  =  _reduce ( set . union ,  graph . values ())  -  set ( graph . keys ()) \n",
+       "\n",
+       "        graph . update ({ element : set ()  for  element  in  extra_elements_in_dependencies }) \n",
+       "        while  True : \n",
+       "            ordered  =  set ( element  for  element ,  dependency  in  graph . items ()  if  len ( dependency )  ==  0 ) \n",
+       "            if  not  ordered : \n",
+       "                break \n",
+       "            yield  ordered \n",
+       "            graph  =  { element :  ( dependency  -  ordered )  for  element ,  dependency  in  graph . items ()  if  element  not  in  ordered } \n",
+       "        if  len ( graph )  !=  0 : \n",
+       "            raise  ValueError ( 'The graph is not acyclic and cannot be linearly ordered' ) \n",
+       "\n",
+       "    def  display_plan ( self ): \n",
+       "        """Display causal links, constraints and the plan""" \n",
+       "\n",
+       "        print ( 'Causal Links' ) \n",
+       "        for  causal_link  in  self . causal_links : \n",
+       "            print ( causal_link ) \n",
+       "\n",
+       "        print ( ' \\n Constraints' ) \n",
+       "        for  constraint  in  self . constraints : \n",
+       "            print ( constraint [ 0 ],  '<' ,  constraint [ 1 ]) \n",
+       "\n",
+       "        print ( ' \\n Partial Order Plan' ) \n",
+       "        print ( list ( reversed ( list ( self . toposort ( self . convert ( self . constraints )))))) \n",
+       "\n",
+       "    def  execute ( self ,  display = True ): \n",
+       "        """Execute the algorithm""" \n",
+       "\n",
+       "        step  =  1 \n",
+       "        self . tries  =  1 \n",
+       "        while  len ( self . agenda )  >  0 : \n",
+       "            step  +=  1 \n",
+       "            # select <G, act1> from Agenda \n",
+       "            try : \n",
+       "                G ,  act1 ,  possible_actions  =  self . find_open_precondition () \n",
+       "            except  IndexError : \n",
+       "                print ( 'Probably Wrong' ) \n",
+       "                break \n",
+       "\n",
+       "            act0  =  possible_actions [ 0 ] \n",
+       "            # remove <G, act1> from Agenda \n",
+       "            self . agenda . remove (( G ,  act1 )) \n",
+       "\n",
+       "            # For actions with variable number of arguments, use least commitment principle \n",
+       "            # act0_temp, bindings = self.find_action_for_precondition(G) \n",
+       "            # act0 = self.generate_action_object(act0_temp, bindings) \n",
+       "\n",
+       "            # Actions = Actions U {act0} \n",
+       "            self . actions . add ( act0 ) \n",
+       "\n",
+       "            # Constraints = add_const(start < act0, Constraints) \n",
+       "            self . constraints  =  self . add_const (( self . start ,  act0 ),  self . constraints ) \n",
+       "\n",
+       "            # for each CL E CausalLinks do \n",
+       "            #   Constraints = protect(CL, act0, Constraints) \n",
+       "            for  causal_link  in  self . causal_links : \n",
+       "                self . constraints  =  self . protect ( causal_link ,  act0 ,  self . constraints ) \n",
+       "\n",
+       "            # Agenda = Agenda U {<P, act0>: P is a precondition of act0} \n",
+       "            for  precondition  in  act0 . precond : \n",
+       "                self . agenda . add (( precondition ,  act0 )) \n",
+       "\n",
+       "            # Constraints = add_const(act0 < act1, Constraints) \n",
+       "            self . constraints  =  self . add_const (( act0 ,  act1 ),  self . constraints ) \n",
+       "\n",
+       "            # CausalLinks U {<act0, G, act1>} \n",
+       "            if  ( act0 ,  G ,  act1 )  not  in  self . causal_links : \n",
+       "                self . causal_links . append (( act0 ,  G ,  act1 )) \n",
+       "\n",
+       "            # for each A E Actions do \n",
+       "            #   Constraints = protect(<act0, G, act1>, A, Constraints) \n",
+       "            for  action  in  self . actions : \n",
+       "                self . constraints  =  self . protect (( act0 ,  G ,  act1 ),  action ,  self . constraints ) \n",
+       "\n",
+       "            if  step  >  200 : \n",
+       "                print ( 'Couldn \\' t find a solution' ) \n",
+       "                return  None ,  None \n",
+       "\n",
+       "        if  display : \n",
+       "            self . display_plan () \n",
+       "        else : \n",
+       "            return  self . constraints ,  self . causal_links                 \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(PartialOrderPlanner)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We will first describe the data-structures and helper methods used, followed by the algorithm used to find a partial-order plan."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Each plan has the following four components:\n",
+    "\n",
+    "1. **`actions`**: a set of actions that make up the steps of the plan.\n",
+    "`actions` is always a subset of `pddl.actions` the set of possible actions for the given planning problem. \n",
+    "The `start` and `finish` actions are dummy actions defined to bring uniformity to the problem. The `start` action has no preconditions and its effects constitute the initial state of the planning problem. \n",
+    "The `finish` action has no effects and its preconditions constitute the goal state of the planning problem.\n",
+    "The empty plan consists of just these two dummy actions.\n",
+    "2. **`constraints`**: a set of temporal constraints that define the order of performing the actions relative to each other.\n",
+    "`constraints` does not define a linear ordering, rather it usually represents a directed graph which is also acyclic if the plan is consistent.\n",
+    "Each ordering is of the form A < B, which reads as \"A before B\" and means that action A _must_ be executed sometime before action B, but not necessarily immediately before.\n",
+    "`constraints` stores these as a set of tuples `(Action(A), Action(B))` which is interpreted as given above.\n",
+    "A constraint cannot be added to `constraints` if it breaks the acyclicity of the existing graph.\n",
+    "3. **`causal_links`**: a set of causal-links. \n",
+    "A causal link between two actions _A_ and _B_ in the plan is written as _A_ --_p_--> _B_ and is read as \"A achieves p for B\".\n",
+    "This imples that _p_ is an effect of _A_ and a precondition of _B_.\n",
+    "It also asserts that _p_ must remain true from the time of action _A_ to the time of action _B_.\n",
+    "Any violation of this rule is called a threat and must be resolved immediately by adding suitable ordering constraints.\n",
+    "`causal_links` stores this information as tuples `(Action(A), precondition(p), Action(B))` which is interpreted as given above.\n",
+    "Causal-links can also be called **protection-intervals**, because the link _A_ --_p_--> _B_ protects _p_ from being negated over the interval from _A_ to _B_.\n",
+    "4. **`agenda`**: a set of open-preconditions.\n",
+    "A precondition is open if it is not achieved by some action in the plan.\n",
+    "Planners will work to reduce the set of open preconditions to the empty set, without introducing a contradiction.\n",
+    "`agenda` stored this information as tuples `(precondition(p), Action(A))` where p is a precondition of the action A.\n",
+    "\n",
+    "A **consistent plan** is a plan in which there are no cycles in the ordering constraints and no conflicts with the causal-links.\n",
+    "A consistent plan with no open preconditions is a **solution**.\n",
+    "class  Linearize : \n",
+       "\n",
+       "    def  __init__ ( self ,  planningproblem ): \n",
+       "        self . planningproblem  =  planningproblem \n",
+       "\n",
+       "    def  filter ( self ,  solution ): \n",
+       "        """Filter out persistence actions from a solution""" \n",
+       "\n",
+       "        new_solution  =  [] \n",
+       "        for  section  in  solution [ 0 ]: \n",
+       "            new_section  =  [] \n",
+       "            for  operation  in  section : \n",
+       "                if  not  ( operation . op [ 0 ]  ==  'P'  and  operation . op [ 1 ] . isupper ()): \n",
+       "                    new_section . append ( operation ) \n",
+       "            new_solution . append ( new_section ) \n",
+       "        return  new_solution \n",
+       "\n",
+       "    def  orderlevel ( self ,  level ,  planningproblem ): \n",
+       "        """Return valid linear order of actions for a given level""" \n",
+       "\n",
+       "        for  permutation  in  itertools . permutations ( level ): \n",
+       "            temp  =  copy . deepcopy ( planningproblem ) \n",
+       "            count  =  0 \n",
+       "            for  action  in  permutation : \n",
+       "                try : \n",
+       "                    temp . act ( action ) \n",
+       "                    count  +=  1 \n",
+       "                except : \n",
+       "                    count  =  0 \n",
+       "                    temp  =  copy . deepcopy ( planningproblem ) \n",
+       "                    break \n",
+       "            if  count  ==  len ( permutation ): \n",
+       "                return  list ( permutation ),  temp \n",
+       "        return  None \n",
+       "\n",
+       "    def  execute ( self ): \n",
+       "        """Finds total-order solution for a planning graph""" \n",
+       "\n",
+       "        graphplan_solution  =  GraphPlan ( self . planningproblem ) . execute () \n",
+       "        filtered_solution  =  self . filter ( graphplan_solution ) \n",
+       "        ordered_solution  =  [] \n",
+       "        planningproblem  =  self . planningproblem \n",
+       "        for  level  in  filtered_solution : \n",
+       "            level_solution ,  planningproblem  =  self . orderlevel ( level ,  planningproblem ) \n",
+       "            for  element  in  level_solution : \n",
+       "                ordered_solution . append ( element ) \n",
+       "\n",
+       "        return  ordered_solution \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(Linearize)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The `filter` method removes the persistence actions (if any) from the planning graph representation.\n",
+    "class  ProbDist : \n",
+       "    """A discrete probability distribution. You name the random variable \n",
+       "    in the constructor, then assign and query probability of values. \n",
+       "    >>> P = ProbDist('Flip'); P['H'], P['T'] = 0.25, 0.75; P['H'] \n",
+       "    0.25 \n",
+       "    >>> P = ProbDist('X', {'lo': 125, 'med': 375, 'hi': 500}) \n",
+       "    >>> P['lo'], P['med'], P['hi'] \n",
+       "    (0.125, 0.375, 0.5) \n",
+       "    """ \n",
+       "\n",
+       "    def  __init__ ( self ,  varname = '?' ,  freqs = None ): \n",
+       "        """If freqs is given, it is a dictionary of values - frequency pairs, \n",
+       "        then ProbDist is normalized.""" \n",
+       "        self . prob  =  {} \n",
+       "        self . varname  =  varname \n",
+       "        self . values  =  [] \n",
+       "        if  freqs : \n",
+       "            for  ( v ,  p )  in  freqs . items (): \n",
+       "                self [ v ]  =  p \n",
+       "            self . normalize () \n",
+       "\n",
+       "    def  __getitem__ ( self ,  val ): \n",
+       "        """Given a value, return P(value).""" \n",
+       "        try : \n",
+       "            return  self . prob [ val ] \n",
+       "        except  KeyError : \n",
+       "            return  0 \n",
+       "\n",
+       "    def  __setitem__ ( self ,  val ,  p ): \n",
+       "        """Set P(val) = p.""" \n",
+       "        if  val  not  in  self . values : \n",
+       "            self . values . append ( val ) \n",
+       "        self . prob [ val ]  =  p \n",
+       "\n",
+       "    def  normalize ( self ): \n",
+       "        """Make sure the probabilities of all values sum to 1. \n",
+       "        Returns the normalized distribution. \n",
+       "        Raises a ZeroDivisionError if the sum of the values is 0.""" \n",
+       "        total  =  sum ( self . prob . values ()) \n",
+       "        if  not  isclose ( total ,  1.0 ): \n",
+       "            for  val  in  self . prob : \n",
+       "                self . prob [ val ]  /=  total \n",
+       "        return  self \n",
+       "\n",
+       "    def  show_approx ( self ,  numfmt = '{:.3g}' ): \n",
+       "        """Show the probabilities rounded and sorted by key, for the \n",
+       "        sake of portable doctests.""" \n",
+       "        return  ', ' . join ([( '{}: '  +  numfmt ) . format ( v ,  p ) \n",
+       "                          for  ( v ,  p )  in  sorted ( self . prob . items ())]) \n",
+       "\n",
+       "    def  __repr__ ( self ): \n",
+       "        return  "P({})" . format ( self . varname ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource ProbDist"
+    "psource(ProbDist)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "0.75"
+      ]
+     },
+     "execution_count": 3,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "p = ProbDist('Flip')\n",
     "p['H'], p['T'] = 0.25, 0.75\n",
@@ -61,28 +249,46 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable:probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method."
+    "The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable: probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "'?'"
+      ]
+     },
+     "execution_count": 4,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "p = ProbDist(freqs={'low': 125, 'medium': 375, 'high': 500})\n",
-    "p.varname\n"
+    "p.varname"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "(0.125, 0.375, 0.5)"
+      ]
+     },
+     "execution_count": 5,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "(p['low'], p['medium'], p['high'])"
    ]
@@ -96,11 +302,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "['low', 'medium', 'high']"
+      ]
+     },
+     "execution_count": 6,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "p.values"
    ]
@@ -109,16 +324,25 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The distribution by default is not normalized if values are added incremently. We can still force normalization by invoking the **normalize** method."
+    "The distribution by default is not normalized if values are added incrementally. We can still force normalization by invoking the **normalize** method."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "(50, 114, 64)"
+      ]
+     },
+     "execution_count": 7,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "p = ProbDist('Y')\n",
     "p['Cat'] = 50\n",
@@ -129,11 +353,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "(0.21929824561403508, 0.5, 0.2807017543859649)"
+      ]
+     },
+     "execution_count": 8,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "p.normalize()\n",
     "(p['Cat'], p['Dog'], p['Mice'])"
@@ -148,11 +381,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "'Cat: 0.219, Dog: 0.5, Mice: 0.281'"
+      ]
+     },
+     "execution_count": 9,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "p.show_approx()"
    ]
@@ -171,35 +413,175 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 10,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "(8, 10)"
+      ]
+     },
+     "execution_count": 10,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "event = {'A': 10, 'B': 9, 'C': 8}\n",
     "variables = ['C', 'A']\n",
-    "event_values (event, variables)"
+    "event_values(event, variables)"
    ]
   },
   {
    "cell_type": "markdown",
-   "metadata": {
-    "collapsed": true
-   },
+   "metadata": {},
    "source": [
     "_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables. "
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 11,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  class  JointProbDist ( ProbDist ): \n",
+       "    """A discrete probability distribute over a set of variables. \n",
+       "    >>> P = JointProbDist(['X', 'Y']); P[1, 1] = 0.25 \n",
+       "    >>> P[1, 1] \n",
+       "    0.25 \n",
+       "    >>> P[dict(X=0, Y=1)] = 0.5 \n",
+       "    >>> P[dict(X=0, Y=1)] \n",
+       "    0.5""" \n",
+       "\n",
+       "    def  __init__ ( self ,  variables ): \n",
+       "        self . prob  =  {} \n",
+       "        self . variables  =  variables \n",
+       "        self . vals  =  defaultdict ( list ) \n",
+       "\n",
+       "    def  __getitem__ ( self ,  values ): \n",
+       "        """Given a tuple or dict of values, return P(values).""" \n",
+       "        values  =  event_values ( values ,  self . variables ) \n",
+       "        return  ProbDist . __getitem__ ( self ,  values ) \n",
+       "\n",
+       "    def  __setitem__ ( self ,  values ,  p ): \n",
+       "        """Set P(values) = p.  Values can be a tuple or a dict; it must \n",
+       "        have a value for each of the variables in the joint. Also keep track \n",
+       "        of the values we have seen so far for each variable.""" \n",
+       "        values  =  event_values ( values ,  self . variables ) \n",
+       "        self . prob [ values ]  =  p \n",
+       "        for  var ,  val  in  zip ( self . variables ,  values ): \n",
+       "            if  val  not  in  self . vals [ var ]: \n",
+       "                self . vals [ var ] . append ( val ) \n",
+       "\n",
+       "    def  values ( self ,  var ): \n",
+       "        """Return the set of possible values for a variable.""" \n",
+       "        return  self . vals [ var ] \n",
+       "\n",
+       "    def  __repr__ ( self ): \n",
+       "        return  "P({})" . format ( self . variables ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource JointProbDist"
+    "psource(JointProbDist)"
    ]
   },
   {
@@ -213,11 +595,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 12,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "P(['X', 'Y'])"
+      ]
+     },
+     "execution_count": 12,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "variables = ['X', 'Y']\n",
     "j = JointProbDist(variables)\n",
@@ -234,11 +625,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 13,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "(0.2, 0.5)"
+      ]
+     },
+     "execution_count": 13,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "j[1,1] = 0.2\n",
     "j[dict(X=0, Y=1)] = 0.5\n",
@@ -255,11 +655,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 14,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[1, 0]"
+      ]
+     },
+     "execution_count": 14,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "j.values('X')"
    ]
@@ -274,7 +683,7 @@
     "\n",
     "This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.\n",
     "\n",
-    "$$\\textbf{P}(X | \\textbf{e}) = α \\textbf{P}(X, \\textbf{e}) = α \\sum_{y} \\textbf{P}(X, \\textbf{e}, \\textbf{y})$$\n",
+    "$$\\textbf{P}(X | \\textbf{e}) = \\alpha \\textbf{P}(X, \\textbf{e}) = \\alpha \\sum_{y} \\textbf{P}(X, \\textbf{e}, \\textbf{y})$$\n",
     "\n",
     "Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**\n",
     "\n",
@@ -283,10 +692,8 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 15,
+   "metadata": {},
    "outputs": [],
    "source": [
     "full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])\n",
@@ -309,13 +716,119 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 16,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  enumerate_joint ( variables ,  e ,  P ): \n",
+       "    """Return the sum of those entries in P consistent with e, \n",
+       "    provided variables is P's remaining variables (the ones not in e).""" \n",
+       "    if  not  variables : \n",
+       "        return  P [ e ] \n",
+       "    Y ,  rest  =  variables [ 0 ],  variables [ 1 :] \n",
+       "    return  sum ([ enumerate_joint ( rest ,  extend ( e ,  Y ,  y ),  P ) \n",
+       "                for  y  in  P . values ( Y )]) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource enumerate_joint"
+    "psource(enumerate_joint)"
    ]
   },
   {
@@ -327,11 +840,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 17,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "0.19999999999999998"
+      ]
+     },
+     "execution_count": 17,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "evidence = dict(Toothache=True)\n",
     "variables = ['Cavity', 'Catch'] # variables not part of evidence\n",
@@ -348,11 +870,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 18,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "0.12"
+      ]
+     },
+     "execution_count": 18,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "evidence = dict(Cavity=True, Toothache=True)\n",
     "variables = ['Catch'] # variables not part of evidence\n",
@@ -371,11 +902,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 19,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "0.6"
+      ]
+     },
+     "execution_count": 19,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "ans2/ans1"
    ]
@@ -389,13 +929,125 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 20,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  enumerate_joint_ask ( X ,  e ,  P ): \n",
+       "    """Return a probability distribution over the values of the variable X, \n",
+       "    given the {var:val} observations e, in the JointProbDist P. [Section 13.3] \n",
+       "    >>> P = JointProbDist(['X', 'Y']) \n",
+       "    >>> P[0,0] = 0.25; P[0,1] = 0.5; P[1,1] = P[2,1] = 0.125 \n",
+       "    >>> enumerate_joint_ask('X', dict(Y=1), P).show_approx() \n",
+       "    '0: 0.667, 1: 0.167, 2: 0.167' \n",
+       "    """ \n",
+       "    assert  X  not  in  e ,  "Query variable must be distinct from evidence" \n",
+       "    Q  =  ProbDist ( X )   # probability distribution for X, initially empty \n",
+       "    Y  =  [ v  for  v  in  P . variables  if  v  !=  X  and  v  not  in  e ]   # hidden variables. \n",
+       "    for  xi  in  P . values ( X ): \n",
+       "        Q [ xi ]  =  enumerate_joint ( Y ,  extend ( e ,  X ,  xi ),  P ) \n",
+       "    return  Q . normalize () \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource enumerate_joint_ask"
+    "psource(enumerate_joint_ask)"
    ]
   },
   {
@@ -407,11 +1059,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 21,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "(0.6, 0.39999999999999997)"
+      ]
+     },
+     "execution_count": 21,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "query_variable = 'Cavity'\n",
     "evidence = dict(Toothache=True)\n",
@@ -430,7 +1091,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "## Bayesian Networks\n",
+    "## BAYESIAN NETWORKS\n",
     "\n",
     "A Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.\n",
     "\n",
@@ -441,13 +1102,182 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 22,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  class  BayesNode : \n",
+       "    """A conditional probability distribution for a boolean variable, \n",
+       "    P(X | parents). Part of a BayesNet.""" \n",
+       "\n",
+       "    def  __init__ ( self ,  X ,  parents ,  cpt ): \n",
+       "        """X is a variable name, and parents a sequence of variable \n",
+       "        names or a space-separated string.  cpt, the conditional \n",
+       "        probability table, takes one of these forms: \n",
+       "\n",
+       "        * A number, the unconditional probability P(X=true). You can \n",
+       "          use this form when there are no parents. \n",
+       "\n",
+       "        * A dict {v: p, ...}, the conditional probability distribution \n",
+       "          P(X=true | parent=v) = p. When there's just one parent. \n",
+       "\n",
+       "        * A dict {(v1, v2, ...): p, ...}, the distribution P(X=true | \n",
+       "          parent1=v1, parent2=v2, ...) = p. Each key must have as many \n",
+       "          values as there are parents. You can use this form always; \n",
+       "          the first two are just conveniences. \n",
+       "\n",
+       "        In all cases the probability of X being false is left implicit, \n",
+       "        since it follows from P(X=true). \n",
+       "\n",
+       "        >>> X = BayesNode('X', '', 0.2) \n",
+       "        >>> Y = BayesNode('Y', 'P', {T: 0.2, F: 0.7}) \n",
+       "        >>> Z = BayesNode('Z', 'P Q', \n",
+       "        ...    {(T, T): 0.2, (T, F): 0.3, (F, T): 0.5, (F, F): 0.7}) \n",
+       "        """ \n",
+       "        if  isinstance ( parents ,  str ): \n",
+       "            parents  =  parents . split () \n",
+       "\n",
+       "        # We store the table always in the third form above. \n",
+       "        if  isinstance ( cpt ,  ( float ,  int )):   # no parents, 0-tuple \n",
+       "            cpt  =  {():  cpt } \n",
+       "        elif  isinstance ( cpt ,  dict ): \n",
+       "            # one parent, 1-tuple \n",
+       "            if  cpt  and  isinstance ( list ( cpt . keys ())[ 0 ],  bool ): \n",
+       "                cpt  =  {( v ,):  p  for  v ,  p  in  cpt . items ()} \n",
+       "\n",
+       "        assert  isinstance ( cpt ,  dict ) \n",
+       "        for  vs ,  p  in  cpt . items (): \n",
+       "            assert  isinstance ( vs ,  tuple )  and  len ( vs )  ==  len ( parents ) \n",
+       "            assert  all ( isinstance ( v ,  bool )  for  v  in  vs ) \n",
+       "            assert  0  <=  p  <=  1 \n",
+       "\n",
+       "        self . variable  =  X \n",
+       "        self . parents  =  parents \n",
+       "        self . cpt  =  cpt \n",
+       "        self . children  =  [] \n",
+       "\n",
+       "    def  p ( self ,  value ,  event ): \n",
+       "        """Return the conditional probability \n",
+       "        P(X=value | parents=parent_values), where parent_values \n",
+       "        are the values of parents in event. (event must assign each \n",
+       "        parent a value.) \n",
+       "        >>> bn = BayesNode('X', 'Burglary', {T: 0.2, F: 0.625}) \n",
+       "        >>> bn.p(False, {'Burglary': False, 'Earthquake': True}) \n",
+       "        0.375""" \n",
+       "        assert  isinstance ( value ,  bool ) \n",
+       "        ptrue  =  self . cpt [ event_values ( event ,  self . parents )] \n",
+       "        return  ptrue  if  value  else  1  -  ptrue \n",
+       "\n",
+       "    def  sample ( self ,  event ): \n",
+       "        """Sample from the distribution for this variable conditioned \n",
+       "        on event's values for parent_variables. That is, return True/False \n",
+       "        at random according with the conditional probability given the \n",
+       "        parents.""" \n",
+       "        return  probability ( self . p ( True ,  event )) \n",
+       "\n",
+       "    def  __repr__ ( self ): \n",
+       "        return  repr (( self . variable ,  ' ' . join ( self . parents ))) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource BayesNode"
+    "psource(BayesNode)"
    ]
   },
   {
@@ -465,10 +1295,8 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "execution_count": 23,
+   "metadata": {},
    "outputs": [],
    "source": [
     "alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'], \n",
@@ -484,15 +1312,13 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "execution_count": 24,
+   "metadata": {},
    "outputs": [],
    "source": [
     "john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})\n",
     "mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.\n",
-    "# Equvivalant to john_node definition. "
+    "# Equivalant to john_node definition."
    ]
   },
   {
@@ -504,10 +1330,8 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "execution_count": 25,
+   "metadata": {},
    "outputs": [],
    "source": [
     "burglary_node = BayesNode('Burglary', '', 0.001)\n",
@@ -523,11 +1347,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 26,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "0.09999999999999998"
+      ]
+     },
+     "execution_count": 26,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)"
    ]
@@ -541,13 +1374,148 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 27,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  class  BayesNet : \n",
+       "    """Bayesian network containing only boolean-variable nodes.""" \n",
+       "\n",
+       "    def  __init__ ( self ,  node_specs = None ): \n",
+       "        """Nodes must be ordered with parents before children.""" \n",
+       "        self . nodes  =  [] \n",
+       "        self . variables  =  [] \n",
+       "        node_specs  =  node_specs  or  [] \n",
+       "        for  node_spec  in  node_specs : \n",
+       "            self . add ( node_spec ) \n",
+       "\n",
+       "    def  add ( self ,  node_spec ): \n",
+       "        """Add a node to the net. Its parents must already be in the \n",
+       "        net, and its variable must not.""" \n",
+       "        node  =  BayesNode ( * node_spec ) \n",
+       "        assert  node . variable  not  in  self . variables \n",
+       "        assert  all (( parent  in  self . variables )  for  parent  in  node . parents ) \n",
+       "        self . nodes . append ( node ) \n",
+       "        self . variables . append ( node . variable ) \n",
+       "        for  parent  in  node . parents : \n",
+       "            self . variable_node ( parent ) . children . append ( node ) \n",
+       "\n",
+       "    def  variable_node ( self ,  var ): \n",
+       "        """Return the node for the variable named var. \n",
+       "        >>> burglary.variable_node('Burglary').variable \n",
+       "        'Burglary'""" \n",
+       "        for  n  in  self . nodes : \n",
+       "            if  n . variable  ==  var : \n",
+       "                return  n \n",
+       "        raise  Exception ( "No such variable: {}" . format ( var )) \n",
+       "\n",
+       "    def  variable_values ( self ,  var ): \n",
+       "        """Return the domain of var.""" \n",
+       "        return  [ True ,  False ] \n",
+       "\n",
+       "    def  __repr__ ( self ): \n",
+       "        return  'BayesNet({0!r})' . format ( self . nodes ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource BayesNet"
+    "psource(BayesNet)"
    ]
   },
   {
@@ -572,11 +1540,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 28,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "BayesNet([('Burglary', ''), ('Earthquake', ''), ('Alarm', 'Burglary Earthquake'), ('JohnCalls', 'Alarm'), ('MaryCalls', 'Alarm')])"
+      ]
+     },
+     "execution_count": 28,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "burglary"
    ]
@@ -590,22 +1567,43 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 29,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "probability.BayesNode"
+      ]
+     },
+     "execution_count": 29,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "type(burglary.variable_node('Alarm'))"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 30,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "{(True, True): 0.95,\n",
+       " (True, False): 0.94,\n",
+       " (False, True): 0.29,\n",
+       " (False, False): 0.001}"
+      ]
+     },
+     "execution_count": 30,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "burglary.variable_node('Alarm').cpt"
    ]
@@ -627,20 +1625,132 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 31,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  enumerate_all ( variables ,  e ,  bn ): \n",
+       "    """Return the sum of those entries in P(variables | e{others}) \n",
+       "    consistent with e, where P is the joint distribution represented \n",
+       "    by bn, and e{others} means e restricted to bn's other variables \n",
+       "    (the ones other than variables). Parents must precede children in variables.""" \n",
+       "    if  not  variables : \n",
+       "        return  1.0 \n",
+       "    Y ,  rest  =  variables [ 0 ],  variables [ 1 :] \n",
+       "    Ynode  =  bn . variable_node ( Y ) \n",
+       "    if  Y  in  e : \n",
+       "        return  Ynode . p ( e [ Y ],  e )  *  enumerate_all ( rest ,  e ,  bn ) \n",
+       "    else : \n",
+       "        return  sum ( Ynode . p ( y ,  e )  *  enumerate_all ( rest ,  extend ( e ,  Y ,  y ),  bn ) \n",
+       "                   for  y  in  bn . variable_values ( Y )) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource enumerate_all"
+    "psource(enumerate_all)"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "**enumerate__all** recursively evaluates a general form of the **Equation 14.4** in the book.\n",
+    "**enumerate_all** recursively evaluates a general form of the **Equation 14.4** in the book.\n",
     "\n",
     "$$\\textbf{P}(X | \\textbf{e}) = α \\textbf{P}(X, \\textbf{e}) = α \\sum_{y} \\textbf{P}(X, \\textbf{e}, \\textbf{y})$$ \n",
     "\n",
@@ -651,29 +1761,147 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 32,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  enumeration_ask ( X ,  e ,  bn ): \n",
+       "    """Return the conditional probability distribution of variable X \n",
+       "    given evidence e, from BayesNet bn. [Figure 14.9] \n",
+       "    >>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary \n",
+       "    ...  ).show_approx() \n",
+       "    'False: 0.716, True: 0.284'""" \n",
+       "    assert  X  not  in  e ,  "Query variable must be distinct from evidence" \n",
+       "    Q  =  ProbDist ( X ) \n",
+       "    for  xi  in  bn . variable_values ( X ): \n",
+       "        Q [ xi ]  =  enumerate_all ( bn . variables ,  extend ( e ,  X ,  xi ),  bn ) \n",
+       "    return  Q . normalize () \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource enumeration_ask"
+    "psource(enumeration_ask)"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network.**enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on."
+    "Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network. **enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on."
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 33,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "0.2841718353643929"
+      ]
+     },
+     "execution_count": 33,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)\n",
     "ans_dist[True]"
@@ -699,13 +1927,120 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 34,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  make_factor ( var ,  e ,  bn ): \n",
+       "    """Return the factor for var in bn's joint distribution given e. \n",
+       "    That is, bn's full joint distribution, projected to accord with e, \n",
+       "    is the pointwise product of these factors for bn's variables.""" \n",
+       "    node  =  bn . variable_node ( var ) \n",
+       "    variables  =  [ X  for  X  in  [ var ]  +  node . parents  if  X  not  in  e ] \n",
+       "    cpt  =  { event_values ( e1 ,  variables ):  node . p ( e1 [ var ],  e1 ) \n",
+       "           for  e1  in  all_events ( variables ,  bn ,  e )} \n",
+       "    return  Factor ( variables ,  cpt ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource make_factor"
+    "psource(make_factor)"
    ]
   },
   {
@@ -721,13 +2056,120 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 35,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  all_events ( variables ,  bn ,  e ): \n",
+       "    """Yield every way of extending e with values for all variables.""" \n",
+       "    if  not  variables : \n",
+       "        yield  e \n",
+       "    else : \n",
+       "        X ,  rest  =  variables [ 0 ],  variables [ 1 :] \n",
+       "        for  e1  in  all_events ( rest ,  bn ,  e ): \n",
+       "            for  x  in  bn . variable_values ( X ): \n",
+       "                yield  extend ( e1 ,  X ,  x ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource all_events"
+    "psource(all_events)"
    ]
   },
   {
@@ -741,10 +2183,8 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "execution_count": 36,
+   "metadata": {},
    "outputs": [],
    "source": [
     "f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)"
@@ -752,33 +2192,60 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 37,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       ""
+      ]
+     },
+     "execution_count": 37,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "f5"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 38,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "{(True,): 0.7, (False,): 0.01}"
+      ]
+     },
+     "execution_count": 38,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "f5.cpt"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 39,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "['Alarm']"
+      ]
+     },
+     "execution_count": 39,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "f5.variables"
    ]
@@ -792,10 +2259,8 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
+   "execution_count": 40,
+   "metadata": {},
    "outputs": [],
    "source": [
     "new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)"
@@ -803,11 +2268,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 41,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "{(True,): 0.7, (False,): 0.30000000000000004}"
+      ]
+     },
+     "execution_count": 41,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "new_factor.cpt"
    ]
@@ -825,13 +2299,117 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 42,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  pointwise_product ( self ,  other ,  bn ): \n",
+       "        """Multiply two factors, combining their variables.""" \n",
+       "        variables  =  list ( set ( self . variables )  |  set ( other . variables )) \n",
+       "        cpt  =  { event_values ( e ,  variables ):  self . p ( e )  *  other . p ( e ) \n",
+       "               for  e  in  all_events ( variables ,  bn ,  {})} \n",
+       "        return  Factor ( variables ,  cpt ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource Factor.pointwise_product"
+    "psource(Factor.pointwise_product)"
    ]
   },
   {
@@ -843,13 +2421,113 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 43,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  pointwise_product ( factors ,  bn ): \n",
+       "    return  reduce ( lambda  f ,  g :  f . pointwise_product ( g ,  bn ),  factors ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource pointwise_product"
+    "psource(pointwise_product)"
    ]
   },
   {
@@ -861,13 +2539,118 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 44,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  sum_out ( self ,  var ,  bn ): \n",
+       "        """Make a factor eliminating var by summing over its values.""" \n",
+       "        variables  =  [ X  for  X  in  self . variables  if  X  !=  var ] \n",
+       "        cpt  =  { event_values ( e ,  variables ):  sum ( self . p ( extend ( e ,  var ,  val )) \n",
+       "                                               for  val  in  bn . variable_values ( var )) \n",
+       "               for  e  in  all_events ( variables ,  bn ,  {})} \n",
+       "        return  Factor ( variables ,  cpt ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource Factor.sum_out"
+    "psource(Factor.sum_out)"
    ]
   },
   {
@@ -879,13 +2662,118 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 45,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  sum_out ( var ,  factors ,  bn ): \n",
+       "    """Eliminate var from all factors by summing over its values.""" \n",
+       "    result ,  var_factors  =  [],  [] \n",
+       "    for  f  in  factors : \n",
+       "        ( var_factors  if  var  in  f . variables  else  result ) . append ( f ) \n",
+       "    result . append ( pointwise_product ( var_factors ,  bn ) . sum_out ( var ,  bn )) \n",
+       "    return  result \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource sum_out"
+    "psource(sum_out)"
    ]
   },
   {
@@ -910,26 +2798,226 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 46,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  elimination_ask ( X ,  e ,  bn ): \n",
+       "    """Compute bn's P(X|e) by variable elimination. [Figure 14.11] \n",
+       "    >>> elimination_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary \n",
+       "    ...  ).show_approx() \n",
+       "    'False: 0.716, True: 0.284'""" \n",
+       "    assert  X  not  in  e ,  "Query variable must be distinct from evidence" \n",
+       "    factors  =  [] \n",
+       "    for  var  in  reversed ( bn . variables ): \n",
+       "        factors . append ( make_factor ( var ,  e ,  bn )) \n",
+       "        if  is_hidden ( var ,  X ,  e ): \n",
+       "            factors  =  sum_out ( var ,  factors ,  bn ) \n",
+       "    return  pointwise_product ( factors ,  bn ) . normalize () \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource elimination_ask"
+    "psource(elimination_ask)"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 47,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "'False: 0.716, True: 0.284'"
+      ]
+     },
+     "execution_count": 47,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Elimination Ask Optimizations\n",
+    "\n",
+    "`elimination_ask` has some critical point to consider and some optimizations could be performed:\n",
+    "\n",
+    "- **Operation on factors**:\n",
+    "\n",
+    "  `sum_out` and `pointwise_product` function used in `elimination_ask` is where space and time complexity arise in the variable elimination algorithm (AIMA3e pg. 526).\n",
+    "\n",
+    ">The only trick is to notice that any factor that does not depend on the variable to be summed out can be moved outside the summation.\n",
+    "\n",
+    "- **Variable ordering**:\n",
+    "\n",
+    "  Elimination ordering is important, every choice of ordering yields a valid algorithm, but different orderings cause different intermediate factors to be generated during the calculation (AIMA3e pg. 527). In this case the algorithm applies a reversed order.\n",
+    "\n",
+    "> In general, the time and space requirements of variable elimination are dominated by the size of the largest factor constructed during the operation of the algorithm. This in turn is determined by the order of elimination of variables and by the structure of the network. It turns out to be intractable to determine the optimal ordering, but several good heuristics are available. One fairly effective method is a greedy one: eliminate whichever variable minimizes the size of the next factor to be constructed.  \n",
+    "\n",
+    "- **Variable relevance**\n",
+    "  \n",
+    "  Some variables could be irrelevant to resolve a query  (i.e. sums to 1). A variable elimination algorithm can therefore remove all these variables before evaluating the query (AIMA3e pg. 528).\n",
+    "\n",
+    "> An optimization is to remove 'every variable that is not an ancestor of a query variable or evidence variable is irrelevant to the query'."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Runtime comparison\n",
+    "Let's see how the runtimes of these two algorithms compare.\n",
+    "We expect variable elimination to outperform enumeration by a large margin as we reduce the number of repetitive calculations significantly."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 48,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "105 µs ± 11.9 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n"
+     ]
+    }
+   ],
    "source": [
+    "%%timeit\n",
+    "enumeration_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 49,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "262 µs ± 54.7 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n"
+     ]
+    }
+   ],
+   "source": [
+    "%%timeit\n",
     "elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()"
    ]
   },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "In this test case we observe that variable elimination is slower than what we expected. It has something to do with number of threads, how Python tries to optimize things and  this happens because the network is very small, with just 5 nodes. The `elimination_ask` has some critical point and some optimizations must be perfomed as seen above.\n",
+    "def  sample ( self ,  event ): \n",
+       "        """Sample from the distribution for this variable conditioned \n",
+       "        on event's values for parent_variables. That is, return True/False \n",
+       "        at random according with the conditional probability given the \n",
+       "        parents.""" \n",
+       "        return  probability ( self . p ( True ,  event )) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource BayesNode.sample"
+    "psource(BayesNode.sample)"
    ]
   },
   {
@@ -963,13 +3155,118 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 51,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  prior_sample ( bn ): \n",
+       "    """Randomly sample from bn's full joint distribution. The result \n",
+       "    is a {variable: value} dict. [Figure 14.13]""" \n",
+       "    event  =  {} \n",
+       "    for  node  in  bn . nodes : \n",
+       "        event [ node . variable ]  =  node . sample ( event ) \n",
+       "    return  event \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource prior_sample"
+    "psource(prior_sample)"
    ]
   },
   {
@@ -980,15 +3277,25 @@
     "\n",
     "def  rejection_sampling ( X ,  e ,  bn ,  N = 10000 ): \n",
+       "    """Estimate the probability distribution of variable X given \n",
+       "    evidence e in BayesNet bn, using N samples.  [Figure 14.14] \n",
+       "    Raises a ZeroDivisionError if all the N samples are rejected, \n",
+       "    i.e., inconsistent with e. \n",
+       "    >>> random.seed(47) \n",
+       "    >>> rejection_sampling('Burglary', dict(JohnCalls=T, MaryCalls=T), \n",
+       "    ...   burglary, 10000).show_approx() \n",
+       "    'False: 0.7, True: 0.3' \n",
+       "    """ \n",
+       "    counts  =  { x :  0  for  x  in  bn . variable_values ( X )}   # bold N in [Figure 14.14] \n",
+       "    for  j  in  range ( N ): \n",
+       "        sample  =  prior_sample ( bn )   # boldface x in [Figure 14.14] \n",
+       "        if  consistent_with ( sample ,  e ): \n",
+       "            counts [ sample [ X ]]  +=  1 \n",
+       "    return  ProbDist ( X ,  counts ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource rejection_sampling"
+    "psource(rejection_sampling)"
    ]
   },
   {
@@ -1083,13 +3566,115 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 58,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  consistent_with ( event ,  evidence ): \n",
+       "    """Is event consistent with the given evidence?""" \n",
+       "    return  all ( evidence . get ( k ,  v )  ==  v \n",
+       "               for  k ,  v  in  event . items ()) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource consistent_with"
+    "psource(consistent_with)"
    ]
   },
   {
@@ -1101,11 +3686,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 59,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "0.8035019455252919"
+      ]
+     },
+     "execution_count": 59,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)\n",
     "p[True]"
@@ -1117,6 +3711,7 @@
    "source": [
     "### Likelihood Weighting\n",
     "\n",
+    "Rejection sampling takes a long time to run when the probability of finding consistent evidence is low. It is also slow for larger networks and more evidence variables.\n",
     "Rejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.\n",
     "\n",
     "The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**."
@@ -1124,13 +3719,124 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 60,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  weighted_sample ( bn ,  e ): \n",
+       "    """Sample an event from bn that's consistent with the evidence e; \n",
+       "    return the event and its weight, the likelihood that the event \n",
+       "    accords to the evidence.""" \n",
+       "    w  =  1 \n",
+       "    event  =  dict ( e )   # boldface x in [Figure 14.15] \n",
+       "    for  node  in  bn . nodes : \n",
+       "        Xi  =  node . variable \n",
+       "        if  Xi  in  e : \n",
+       "            w  *=  node . p ( e [ Xi ],  event ) \n",
+       "        else : \n",
+       "            event [ Xi ]  =  node . sample ( event ) \n",
+       "    return  event ,  w \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource weighted_sample"
+    "psource(weighted_sample)"
    ]
   },
   {
@@ -1145,24 +3851,144 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 61,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "({'Rain': True, 'Cloudy': False, 'Sprinkler': True, 'WetGrass': True}, 0.2)"
+      ]
+     },
+     "execution_count": 61,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "weighted_sample(sprinkler, dict(Rain=True))"
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 62,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  likelihood_weighting ( X ,  e ,  bn ,  N = 10000 ): \n",
+       "    """Estimate the probability distribution of variable X given \n",
+       "    evidence e in BayesNet bn.  [Figure 14.15] \n",
+       "    >>> random.seed(1017) \n",
+       "    >>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T), \n",
+       "    ...   burglary, 10000).show_approx() \n",
+       "    'False: 0.702, True: 0.298' \n",
+       "    """ \n",
+       "    W  =  { x :  0  for  x  in  bn . variable_values ( X )} \n",
+       "    for  j  in  range ( N ): \n",
+       "        sample ,  weight  =  weighted_sample ( bn ,  e )   # boldface x, w in [Figure 14.15] \n",
+       "        W [ sample [ X ]]  +=  weight \n",
+       "    return  ProbDist ( X ,  W ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource likelihood_weighting"
+    "psource(likelihood_weighting)"
    ]
   },
   {
@@ -1174,11 +4000,20 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 63,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "'False: 0.2, True: 0.8'"
+      ]
+     },
+     "execution_count": 63,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()"
    ]
@@ -1196,13 +4031,124 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": true
-   },
-   "outputs": [],
+   "execution_count": 64,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "\n",
+       "\n",
+       "\n",
+       "  def  gibbs_ask ( X ,  e ,  bn ,  N = 1000 ): \n",
+       "    """[Figure 14.16]""" \n",
+       "    assert  X  not  in  e ,  "Query variable must be distinct from evidence" \n",
+       "    counts  =  { x :  0  for  x  in  bn . variable_values ( X )}   # bold N in [Figure 14.16] \n",
+       "    Z  =  [ var  for  var  in  bn . variables  if  var  not  in  e ] \n",
+       "    state  =  dict ( e )   # boldface x in [Figure 14.16] \n",
+       "    for  Zi  in  Z : \n",
+       "        state [ Zi ]  =  random . choice ( bn . variable_values ( Zi )) \n",
+       "    for  j  in  range ( N ): \n",
+       "        for  Zi  in  Z : \n",
+       "            state [ Zi ]  =  markov_blanket_sample ( Zi ,  state ,  bn ) \n",
+       "            counts [ state [ X ]]  +=  1 \n",
+       "    return  ProbDist ( X ,  counts ) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
    "source": [
-    "%psource gibbs_ask"
+    "psource(gibbs_ask)"
    ]
   },
   {
@@ -1214,39 +4160,2377 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
+   "execution_count": 65,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "'False: 0.215, True: 0.785'"
+      ]
+     },
+     "execution_count": 65,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
    "source": [
     "gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()"
    ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 3",
-   "language": "python",
-   "name": "python3"
   },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.4.3"
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "#### Runtime analysis\n",
+    "Let's take a look at how much time each algorithm takes."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 66,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "13.2 ms ± 3.45 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+     ]
+    }
+   ],
+   "source": [
+    "%%timeit\n",
+    "all_observations = [prior_sample(sprinkler) for x in range(1000)]\n",
+    "rain_true = [observation for observation in all_observations if observation['Rain'] == True]\n",
+    "len([observation for observation in rain_true if observation['Cloudy'] == True]) / len(rain_true)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 67,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "11 ms ± 687 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
+     ]
+    }
+   ],
+   "source": [
+    "%%timeit\n",
+    "rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 68,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "2.12 ms ± 554 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+     ]
+    }
+   ],
+   "source": [
+    "%%timeit\n",
+    "likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 69,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "14.4 ms ± 2.16 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
+     ]
+    }
+   ],
+   "source": [
+    "%%timeit\n",
+    "gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "As expected, all algorithms have a very similar runtime.\n",
+    "However, rejection sampling would be a lot faster and more accurate when the probabiliy of finding data-points consistent with the required evidence is small.\n",
+    "class  HiddenMarkovModel : \n",
+       "    """A Hidden markov model which takes Transition model and Sensor model as inputs""" \n",
+       "\n",
+       "    def  __init__ ( self ,  transition_model ,  sensor_model ,  prior = None ): \n",
+       "        self . transition_model  =  transition_model \n",
+       "        self . sensor_model  =  sensor_model \n",
+       "        self . prior  =  prior  or  [ 0.5 ,  0.5 ] \n",
+       "\n",
+       "    def  sensor_dist ( self ,  ev ): \n",
+       "        if  ev  is  True : \n",
+       "            return  self . sensor_model [ 0 ] \n",
+       "        else : \n",
+       "            return  self . sensor_model [ 1 ] \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(HiddenMarkovModel)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We instantiate the object **`hmm`** of the class using a list of lists for both the transition and the sensor model."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 71,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]\n",
+    "umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]\n",
+    "hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The **`sensor_dist()`** method returns a list with the conditional probabilities of the sensor model."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 72,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[0.9, 0.2]"
+      ]
+     },
+     "execution_count": 72,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "hmm.sensor_dist(ev=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now that we have defined an HMM object, our task here is to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$ given evidence **U** at each time step **t**.\n",
+    "def  forward ( HMM ,  fv ,  ev ): \n",
+       "    prediction  =  vector_add ( scalar_vector_product ( fv [ 0 ],  HMM . transition_model [ 0 ]), \n",
+       "                            scalar_vector_product ( fv [ 1 ],  HMM . transition_model [ 1 ])) \n",
+       "    sensor_dist  =  HMM . sensor_dist ( ev ) \n",
+       "\n",
+       "    return  normalize ( element_wise_product ( sensor_dist ,  prediction )) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(forward)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 74,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "The probability of raining on day 1 is 0.82\n"
+     ]
+    }
+   ],
+   "source": [
+    "umbrella_prior = [0.5, 0.5]\n",
+    "belief_day_1 = forward(hmm, umbrella_prior, ev=True)\n",
+    "print ('The probability of raining on day 1 is {:.2f}'.format(belief_day_1[0]))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "In **Day 2** our initial belief is the updated belief of **Day 1**.\n",
+    "Again using the **`forward()`** function we can compute the probability of raining in **Day 2**"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 75,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "The probability of raining in day 2 is 0.88\n"
+     ]
+    }
+   ],
+   "source": [
+    "belief_day_2 = forward(hmm, belief_day_1, ev=True)\n",
+    "print ('The probability of raining in day 2 is {:.2f}'.format(belief_day_2[0]))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "In the smoothing part we are interested in computing the distribution over past states given evidence up to the present. Assume that we want to compute the distribution for the time **k**, for $0\\leq k\n",
+       "\n",
+       "\n",
+       "  def  backward ( HMM ,  b ,  ev ): \n",
+       "    sensor_dist  =  HMM . sensor_dist ( ev ) \n",
+       "    prediction  =  element_wise_product ( sensor_dist ,  b ) \n",
+       "\n",
+       "    return  normalize ( vector_add ( scalar_vector_product ( prediction [ 0 ],  HMM . transition_model [ 0 ]), \n",
+       "                                scalar_vector_product ( prediction [ 1 ],  HMM . transition_model [ 1 ]))) \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(backward)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 77,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[0.6272727272727272, 0.37272727272727274]"
+      ]
+     },
+     "execution_count": 77,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "b = [1, 1]\n",
+    "backward(hmm, b, ev=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Some may notice that the result is not the same as in the book. The main reason is that in the book the normalization step is not used. If we want to normalize the result, one can use the **`normalize()`** helper function.\n",
+    "\n",
+    "In order to find the smoothed estimate for raining in **Day k**, we will use the **`forward_backward()`** function. As in the example in the book, the umbrella is observed in both days and the prior distribution is [0.5, 0.5]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 78,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/markdown": [
+       "### AIMA3e\n",
+       "__function__ FORWARD-BACKWARD(__ev__, _prior_) __returns__ a vector of probability distributions  \n",
+       " __inputs__: __ev__, a vector of evidence values for steps 1,…,_t_  \n",
+       "     _prior_, the prior distribution on the initial state, __P__(__X__0 )  \n",
+       " __local variables__: __fv__, a vector of forward messages for steps 0,…,_t_    \n",
+       "        __b__, a representation of the backward message, initially all 1s  \n",
+       "        __sv__, a vector of smoothed estimates for steps 1,…,_t_  \n",
+       "\n",
+       " __fv__\\[0\\] ← _prior_  \n",
+       " __for__ _i_ = 1 __to__ _t_ __do__  \n",
+       "   __fv__\\[_i_\\] ← FORWARD(__fv__\\[_i_ − 1\\], __ev__\\[_i_\\])  \n",
+       " __for__ _i_ = _t_ __downto__ 1 __do__  \n",
+       "   __sv__\\[_i_\\] ← NORMALIZE(__fv__\\[_i_\\] × __b__)  \n",
+       "   __b__ ← BACKWARD(__b__, __ev__\\[_i_\\])  \n",
+       " __return__ __sv__\n",
+       "\n",
+       "---\n",
+       "__Figure ??__ The forward\\-backward algorithm for smoothing: computing posterior probabilities of a sequence of states given a sequence of observations. The FORWARD and BACKWARD operators are defined by Equations (__??__) and (__??__), respectively."
+      ],
+      "text/plain": [
+       ""
+      ]
+     },
+     "execution_count": 78,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "pseudocode('Forward-Backward')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 79,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "The probability of raining in Day 0 is 0.65 and in Day 1 is 0.88\n"
+     ]
+    }
+   ],
+   "source": [
+    "umbrella_prior = [0.5, 0.5]\n",
+    "prob = forward_backward(hmm, ev=[T, T], prior=umbrella_prior)\n",
+    "print ('The probability of raining in Day 0 is {:.2f} and in Day 1 is {:.2f}'.format(prob[0][0], prob[1][0]))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "\n",
+    "Since HMMs are represented as single variable systems, we can represent the transition model and sensor model as matrices.\n",
+    "The `forward_backward` algorithm can be easily carried out on this representation (as we have done here) with a time complexity of $O({S}^{2} t)$ where t is the length of the sequence and each step multiplies a vector of size $S$ with a matrix of dimensions $SxS$.\n",
+    "def  fixed_lag_smoothing ( e_t ,  HMM ,  d ,  ev ,  t ): \n",
+       "    """[Figure 15.6] \n",
+       "    Smoothing algorithm with a fixed time lag of 'd' steps. \n",
+       "    Online algorithm that outputs the new smoothed estimate if observation \n",
+       "    for new time step is given.""" \n",
+       "    ev . insert ( 0 ,  None ) \n",
+       "\n",
+       "    T_model  =  HMM . transition_model \n",
+       "    f  =  HMM . prior \n",
+       "    B  =  [[ 1 ,  0 ],  [ 0 ,  1 ]] \n",
+       "    evidence  =  [] \n",
+       "\n",
+       "    evidence . append ( e_t ) \n",
+       "    O_t  =  vector_to_diagonal ( HMM . sensor_dist ( e_t )) \n",
+       "    if  t  >  d : \n",
+       "        f  =  forward ( HMM ,  f ,  e_t ) \n",
+       "        O_tmd  =  vector_to_diagonal ( HMM . sensor_dist ( ev [ t  -  d ])) \n",
+       "        B  =  matrix_multiplication ( inverse_matrix ( O_tmd ),  inverse_matrix ( T_model ),  B ,  T_model ,  O_t ) \n",
+       "    else : \n",
+       "        B  =  matrix_multiplication ( B ,  T_model ,  O_t ) \n",
+       "    t  +=  1 \n",
+       "\n",
+       "    if  t  >  d : \n",
+       "        # always returns a 1x2 matrix \n",
+       "        return  [ normalize ( i )  for  i  in  matrix_multiplication ([ f ],  B )][ 0 ] \n",
+       "    else : \n",
+       "        return  None \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(fixed_lag_smoothing)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "This algorithm applies `forward` as usual and optimizes the smoothing step by using the equations above.\n",
+    "This optimization could be achieved only because HMM properties can be represented as matrices.\n",
+    "def  particle_filtering ( e ,  N ,  HMM ): \n",
+       "    """Particle filtering considering two states variables.""" \n",
+       "    dist  =  [ 0.5 ,  0.5 ] \n",
+       "    # Weight Initialization \n",
+       "    w  =  [ 0  for  _  in  range ( N )] \n",
+       "    # STEP 1 \n",
+       "    # Propagate one step using transition model given prior state \n",
+       "    dist  =  vector_add ( scalar_vector_product ( dist [ 0 ],  HMM . transition_model [ 0 ]), \n",
+       "                      scalar_vector_product ( dist [ 1 ],  HMM . transition_model [ 1 ])) \n",
+       "    # Assign state according to probability \n",
+       "    s  =  [ 'A'  if  probability ( dist [ 0 ])  else  'B'  for  _  in  range ( N )] \n",
+       "    w_tot  =  0 \n",
+       "    # Calculate importance weight given evidence e \n",
+       "    for  i  in  range ( N ): \n",
+       "        if  s [ i ]  ==  'A' : \n",
+       "            # P(U|A)*P(A) \n",
+       "            w_i  =  HMM . sensor_dist ( e )[ 0 ]  *  dist [ 0 ] \n",
+       "        if  s [ i ]  ==  'B' : \n",
+       "            # P(U|B)*P(B) \n",
+       "            w_i  =  HMM . sensor_dist ( e )[ 1 ]  *  dist [ 1 ] \n",
+       "        w [ i ]  =  w_i \n",
+       "        w_tot  +=  w_i \n",
+       "\n",
+       "    # Normalize all the weights \n",
+       "    for  i  in  range ( N ): \n",
+       "        w [ i ]  =  w [ i ]  /  w_tot \n",
+       "\n",
+       "    # Limit weights to 4 digits \n",
+       "    for  i  in  range ( N ): \n",
+       "        w [ i ]  =  float ( "{0:.4f}" . format ( w [ i ])) \n",
+       "\n",
+       "    # STEP 2 \n",
+       "\n",
+       "    s  =  weighted_sample_with_replacement ( N ,  s ,  w ) \n",
+       "\n",
+       "    return  s \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(particle_filtering)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Here, `scalar_vector_product` and `vector_add` are helper functions to help with vector math and `weighted_sample_with_replacement` resamples from a weighted sample and replaces the original sample, as is obvious from the name.\n",
+    "def  monte_carlo_localization ( a ,  z ,  N ,  P_motion_sample ,  P_sensor ,  m ,  S = None ): \n",
+       "    """Monte Carlo localization algorithm from Fig 25.9""" \n",
+       "\n",
+       "    def  ray_cast ( sensor_num ,  kin_state ,  m ): \n",
+       "        return  m . ray_cast ( sensor_num ,  kin_state ) \n",
+       "\n",
+       "    M  =  len ( z ) \n",
+       "    W  =  [ 0 ] * N \n",
+       "    S_  =  [ 0 ] * N \n",
+       "    W_  =  [ 0 ] * N \n",
+       "    v  =  a [ 'v' ] \n",
+       "    w  =  a [ 'w' ] \n",
+       "\n",
+       "    if  S  is  None : \n",
+       "        S  =  [ m . sample ()  for  _  in  range ( N )] \n",
+       "\n",
+       "    for  i  in  range ( N ): \n",
+       "        S_ [ i ]  =  P_motion_sample ( S [ i ],  v ,  w ) \n",
+       "        W_ [ i ]  =  1 \n",
+       "        for  j  in  range ( M ): \n",
+       "            z_  =  ray_cast ( j ,  S_ [ i ],  m ) \n",
+       "            W_ [ i ]  =  W_ [ i ]  *  P_sensor ( z [ j ],  z_ ) \n",
+       "\n",
+       "    S  =  weighted_sample_with_replacement ( N ,  S_ ,  W_ ) \n",
+       "    return  S \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(monte_carlo_localization)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Our implementation of Monte Carlo Localization uses the range scan method.\n",
+    "The `ray_cast` helper function casts rays in different directions and stores the range values.\n",
+    ""
+      ]
+     },
+     "metadata": {
+      "needs_background": "light"
+     },
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "m = MCLmap([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0],\n",
+    "            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+    "            [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+    "            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+    "            [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0],\n",
+    "            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+    "            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+    "            [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],\n",
+    "            [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n",
+    "            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],\n",
+    "            [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0]])\n",
+    "\n",
+    "heatmap(m.m, cmap='binary')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Let's define the motion model as a function `P_motion_sample`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 91,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def P_motion_sample(kin_state, v, w):\n",
+    "    \"\"\"Sample from possible kinematic states.\n",
+    "    Returns from a single element distribution (no uncertainity in motion)\"\"\"\n",
+    "    pos = kin_state[:2]\n",
+    "    orient = kin_state[2]\n",
+    "\n",
+    "    # for simplicity the robot first rotates and then moves\n",
+    "    orient = (orient + w)%4\n",
+    "    for _ in range(orient):\n",
+    "        v = (v[1], -v[0])\n",
+    "    pos = vector_add(pos, v)\n",
+    "    return pos + (orient,)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Define the sensor model as a function `P_sensor`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 92,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "def P_sensor(x, y):\n",
+    "    \"\"\"Conditional probability for sensor reading\"\"\"\n",
+    "    # Need not be exact probability. Can use a scaled value.\n",
+    "    if x == y:\n",
+    "        return 0.8\n",
+    "    elif abs(x - y) <= 2:\n",
+    "        return 0.05\n",
+    "    else:\n",
+    "        return 0"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Initializing variables."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 93,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "a = {'v': (0, 0), 'w': 0}\n",
+    "z = (2, 4, 1, 6)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Let's run `monte_carlo_localization` with these parameters to find a sample distribution S."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 94,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Let's plot the values in the sample distribution `S`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 95,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "GRID:\n",
+      "  0   0   12     0   143   14     0   0   0   0   0   0   0   0   0   0   0\n",
+      "  0   0    0     0    17   52   201   6   0   0   0   0   0   0   0   0   0\n",
+      "  0   0    0     0     0    0     0   0   0   0   0   0   0   0   0   0   0\n",
+      "  0   0    0     3     5   19     9   3   0   0   0   0   0   0   0   0   0\n",
+      "  0   0    0     0     0    0     0   0   0   0   0   0   0   0   0   0   0\n",
+      "  0   0    6   166     0   21     0   0   0   0   0   0   0   0   0   0   0\n",
+      "  0   0    0     1    11   75     0   0   0   0   0   0   0   0   0   0   0\n",
+      " 73   0    0     0     0    0     0   0   0   1   0   0   0   0   0   0   0\n",
+      "124   0    0     0     0    0     0   1   0   3   0   0   0   0   0   0   0\n",
+      "  0   0    0    14     4   15     1   0   0   0   0   0   0   0   0   0   0\n",
+      "  0   0    0     0     0    0     0   0   0   0   0   0   0   0   0   0   0\n"
+     ]
+    },
+    {
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfAAAAFaCAYAAADhKw9uAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAATEElEQVR4nO3df6zldX3n8debuSK/F2Swlt+yi7pq2upOjdbU7Qqs+KNis5td7dJg2w1Ju1U0thZtIt1s0pi2cdukjV0WLSQl2i7S6nZbFW271qyLHVBUxFYiCKMIA4aCXSsF3vvHPSS317lzh3u+c858Lo9HMrn3nPO95/P+zNy5z/mee+6Z6u4AAGM5bNkDAACPn4ADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg6HsKq6varOXXfd66vqkxPcd1fVP5v3foDlEHAAGJCAw8Cq6uSq+kBV7a2q26rqjWtue0FVfaqq7q+qu6rqt6rq8Nltn5gddlNVfauq/n1V/UhV7amqt1bVPbOPeU1VvaKq/qaqvllVbz+Q+5/d3lX1xqr6SlXdW1W/VlW+5sBE/GWCQc1i+D+T3JTklCTnJHlTVb1sdsgjSd6cZGeSF81u/9kk6e6XzI75/u4+prt/f3b5aUmOmN3fO5L89yQXJvkXSX44yTuq6qzN7n+NH0uyK8nzk1yQ5Kem2DuQlNdCh0NXVd2e1UA+vObqw5PcmOQtSf5Hd5++5vi3JXlGd//kPu7rTUn+ZXf/2OxyJzm7u2+dXf6RJH+a5JjufqSqjk3yQJIXdvf1s2NuSPJfuvuPDvD+X97dH55d/tkk/6a7z5njtwSYWVn2AMCmXtPdH3vsQlW9Psl/THJGkpOr6v41x+5I8pez456R5F1ZPQM+Kqt/32/YZK37uvuR2fvfnr29e83t305yzOO4/zvXvP/VJCdvsj5wgDyEDuO6M8lt3X38ml/HdvcrZre/O8mXsnqWfVyStyepCdc/kPs/bc37pyf5+oTrwxOagMO4Pp3kgar6xao6sqp2VNVzq+oHZ7c/9hD4t6rqWUl+Zt3H353krGzdZvefJL9QVSdU1WlJLkny+/s4BtgCAYdBzR7q/tEkP5DktiT3JrkiyT+ZHfLzSX48yYNZfTLa+nj+cpKrZs8i/3dbGGGz+0+SD2b1YfXPJvlfSd6zhXWAffAkNuCgWP8kOWBazsABYEACDgAD8hA6AAzIGTgADGihL+Syc+eJfebpp21+4GgefWTzY6Z02I6FLfXQbZ9b2FqHn/Gcha21yN9DgHnc8Jmb7u3uk9Zfv9CAn3n6adn9yY9tfuBg+u8fWOh6dcRxC1vr9gtPWdhaZ1zxhwtbq444fmFrAcyjjj7pq/u63kPoADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMKC5Al5V51fVX1fVrVV16VRDAQD7t+WAV9WOJL+d5OVJnp3kdVX17KkGAwA2Ns8Z+AuS3NrdX+nuh5K8P8kF04wFAOzPPAE/Jcmday7vmV33j1TVxVW1u6p27733vjmWAwAeM0/Aax/X9Xdd0X15d+/q7l0n7TxxjuUAgMfME/A9Sdb+596nJvn6fOMAAAdinoD/VZKzq+rpVXV4ktcm+dA0YwEA+7Oy1Q/s7oer6ueSfCTJjiTv7e6bJ5sMANjQlgOeJN39J0n+ZKJZAIAD5JXYAGBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAHN9XPgrKojjlv2CAfNGZfftLC1+va/XNha//nHL17YWkly2advX9hatfLkha0FLI8zcAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIBWlj0Ah7Y6aufi1nrWjy5srV++8a6FrQVwMDgDB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMaMsBr6rTqurPq+qWqrq5qi6ZcjAAYGPzvBb6w0ne0t03VtWxSW6oquu6+4sTzQYAbGDLZ+DdfVd33zh7/8EktyQ5ZarBAICNTfI98Ko6M8nzkly/j9surqrdVbV77733TbEcADzhzR3wqjomyQeSvKm7H1h/e3df3t27unvXSTtPnHc5ACBzBryqnpTVeF/d3ddOMxIAsJl5noVeSd6T5Jbuftd0IwEAm5nnDPzFSX4iyUur6rOzX6+YaC4AYD+2/GNk3f3JJDXhLADAAfJKbAAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABjTP/wfOkvSjjyxwsQWu9e37F7fW4Ucvbq0kWTliYUvVYTsWthawPM7AAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AA1pZ9gA8fnXYjgWutsC1jnnq4tYCGJwzcAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgOYOeFXtqKrPVNUfTzEQALC5Kc7AL0lyywT3AwAcoLkCXlWnJnllkiumGQcAOBDznoH/RpK3Jnl0owOq6uKq2l1Vu/fee9+cywEAyRwBr6pXJbmnu2/Y33HdfXl37+ruXSftPHGrywEAa8xzBv7iJK+uqtuTvD/JS6vq9yaZCgDYry0HvLvf1t2ndveZSV6b5M+6+8LJJgMANuTnwAFgQCtT3El3/0WSv5jivgCAzTkDB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAY0yc+BP9H1w99Z6HrXvfL0ha31rz9y98LW6ge/sbC16tinLWwtgIPBGTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AA1pZ9gDbQa08eaHrnffhbyxsrf7Og4tb6//8t4WtVS+7bGFrARwMzsABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQHMFvKqOr6prqupLVXVLVb1oqsEAgI3N+1Kqv5nkw939b6vq8CRHTTATALCJLQe8qo5L8pIkr0+S7n4oyUPTjAUA7M88D6GflWRvkt+tqs9U1RVVdfT6g6rq4qraXVW799573xzLAQCPmSfgK0men+Td3f28JH+X5NL1B3X35d29q7t3nbTzxDmWAwAeM0/A9yTZ093Xzy5fk9WgAwAH2ZYD3t3fSHJnVT1zdtU5Sb44yVQAwH7N+yz0NyS5evYM9K8k+cn5RwIANjNXwLv7s0l2TTQLAHCAvBIbAAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABzftKbI/PA3fl0Y/9ykKWOuzcty9knWWoqsUt9uRjF7ZUveyyha3FNLp7YWst9PMeBuAMHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMKCVRS72yAPfzLc+evVC1jru3LcvZB04EN29sLWqamFrpR9d3Fq1Y3FrwQCcgQPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABjRXwKvqzVV1c1V9oareV1VHTDUYALCxLQe8qk5J8sYku7r7uUl2JHntVIMBABub9yH0lSRHVtVKkqOSfH3+kQCAzWw54N39tSS/nuSOJHcl+dvu/uj646rq4qraXVW77/v2Av/nIgDYxuZ5CP2EJBckeXqSk5McXVUXrj+uuy/v7l3dvevEIz1nDgCmME9Rz01yW3fv7e5/SHJtkh+aZiwAYH/mCfgdSV5YVUdVVSU5J8kt04wFAOzPPN8Dvz7JNUluTPL52X1dPtFcAMB+rMzzwd19WZLLJpoFADhAnlUGAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADGiunwN/vHac+pwc96sfW+SS21L//f2LW+xJRy9urYceXNxaR5ywuLWSrL5Y4fZTh+1Y9gjwhOUMHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwIAEHgAEJOAAMSMABYEACDgADEnAAGJCAA8CABBwABiTgADAgAQeAAQk4AAxIwAFgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABrSx7AB6/OuL4ZY9wcBz5lGVPADAMZ+AAMCABB4ABCTgADEjAAWBAAg4AAxJwABiQgAPAgAQcAAYk4AAwoE0DXlXvrap7quoLa657SlVdV1Vfnr094eCOCQCsdSBn4FcmOX/ddZcm+Xh3n53k47PLAMCCbBrw7v5Ekm+uu/qCJFfN3r8qyWsmngsA2I+tfg/8e7r7riSZvX3qRgdW1cVVtbuqdu+9974tLgcArHXQn8TW3Zd3967u3nXSzhMP9nIA8ISw1YDfXVXfmySzt/dMNxIAsJmtBvxDSS6avX9Rkg9OMw4AcCAO5MfI3pfkU0meWVV7quqnk7wzyXlV9eUk580uAwALsrLZAd39ug1uOmfiWQCAA+SV2ABgQAIOAAMScAAYkIADwIAEHAAGJOAAMCABB4ABCTgADKi6e3GLVe1N8tXH+WE7k9x7EMY5FGzXvW3XfSXbd2/bdV/J9t3bdt1Xsn33ttV9ndHdJ62/cqEB34qq2t3du5Y9x8GwXfe2XfeVbN+9bdd9Jdt3b9t1X8n23dvU+/IQOgAMSMABYEAjBPzyZQ9wEG3XvW3XfSXbd2/bdV/J9t3bdt1Xsn33Num+DvnvgQMA322EM3AAYB0BB4ABHdIBr6rzq+qvq+rWqrp02fNMoapOq6o/r6pbqurmqrpk2TNNrap2VNVnquqPlz3LVKrq+Kq6pqq+NPuze9GyZ5pKVb159rn4hap6X1UdseyZtqKq3ltV91TVF9Zc95Squq6qvjx7e8IyZ9yqDfb2a7PPx89V1R9W1fHLnHEr9rWvNbf9fFV1Ve1cxmzz2mhvVfWGWddurqpfnWeNQzbgVbUjyW8neXmSZyd5XVU9e7lTTeLhJG/p7n+e5IVJ/tM22ddalyS5ZdlDTOw3k3y4u5+V5PuzTfZXVackeWOSXd393CQ7krx2uVNt2ZVJzl933aVJPt7dZyf5+OzyiK7Md+/tuiTP7e7vS/I3Sd626KEmcGW+e1+pqtOSnJfkjkUPNKErs25vVfWvklyQ5Pu6+zlJfn2eBQ7ZgCd5QZJbu/sr3f1QkvdndeND6+67uvvG2fsPZjUEpyx3qulU1alJXpnkimXPMpWqOi7JS5K8J0m6+6Huvn+5U01qJcmRVbWS5KgkX1/yPFvS3Z9I8s11V1+Q5KrZ+1clec1Ch5rIvvbW3R/t7odnF/9vklMXPticNvgzS5L/muStSYZ9lvUGe/uZJO/s7u/MjrlnnjUO5YCfkuTONZf3ZBuFLkmq6swkz0ty/XInmdRvZPUv3qPLHmRCZyXZm+R3Z98auKKqjl72UFPo7q9l9SzgjiR3Jfnb7v7ocqea1Pd0913J6j+ekzx1yfMcLD+V5E+XPcQUqurVSb7W3Tcte5aD4BlJfriqrq+q/11VPzjPnR3KAa99XDfsv8bWq6pjknwgyZu6+4FlzzOFqnpVknu6+4ZlzzKxlSTPT/Lu7n5ekr/LuA/F/iOz7wlfkOTpSU5OcnRVXbjcqXg8quqXsvqtuauXPcu8quqoJL+U5B3LnuUgWUlyQla/ffoLSf6gqvbVugNyKAd8T5LT1lw+NYM+tLdeVT0pq/G+uruvXfY8E3pxkldX1e1Z/ZbHS6vq95Y70iT2JNnT3Y89UnJNVoO+HZyb5Lbu3tvd/5Dk2iQ/tOSZpnR3VX1vkszezvWQ5aGmqi5K8qok/6G3x4t6/NOs/mPyptnXkVOT3FhVT1vqVNPZk+TaXvXprD5SueUn6R3KAf+rJGdX1dOr6vCsPrHmQ0ueaW6zf229J8kt3f2uZc8zpe5+W3ef2t1nZvXP68+6e/izue7+RpI7q+qZs6vOSfLFJY40pTuSvLCqjpp9bp6TbfIEvZkPJblo9v5FST64xFkmVVXnJ/nFJK/u7v+37Hmm0N2f7+6ndveZs68je5I8f/Z3cDv4oyQvTZKqekaSwzPH/7p2yAZ89uSMn0vykax+QfmD7r55uVNN4sVJfiKrZ6efnf16xbKHYlNvSHJ1VX0uyQ8k+ZUlzzOJ2aMK1yS5Mcnns/o1YciXsayq9yX5VJJnVtWeqvrpJO9Mcl5VfTmrz2p+5zJn3KoN9vZbSY5Nct3s68jvLHXILdhgX9vCBnt7b5KzZj9a9v4kF83zyImXUgWAAR2yZ+AAwMYEHAAGJOAAMCABB4ABCTgADEjAAWBAAg4AA/r/85kBLqIO9qEAAAAASUVORK5CYII=\n",
+      "text/plain": [
+       ""
+      ]
+     },
+     "metadata": {
+      "needs_background": "light"
+     },
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "grid = [[0]*17 for _ in range(11)]\n",
+    "for x, y, _ in S:\n",
+    "    if 0 <= x < 11 and 0 <= y < 17:\n",
+    "        grid[x][y] += 1\n",
+    "print(\"GRID:\")\n",
+    "print_table(grid)\n",
+    "heatmap(grid, cmap='Oranges')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The distribution is highly concentrated at `(5, 3)`, but the robot is not very confident about its position as some other cells also have high probability values."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Let's look at another scenario."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 96,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "GRID:\n",
+      "0   0   0   0   0   0   0   0      0   0   0   0   0   0   0   0   0\n",
+      "0   0   0   0   0   0   0   0   1000   0   0   0   0   0   0   0   0\n",
+      "0   0   0   0   0   0   0   0      0   0   0   0   0   0   0   0   0\n",
+      "0   0   0   0   0   0   0   0      0   0   0   0   0   0   0   0   0\n",
+      "0   0   0   0   0   0   0   0      0   0   0   0   0   0   0   0   0\n",
+      "0   0   0   0   0   0   0   0      0   0   0   0   0   0   0   0   0\n",
+      "0   0   0   0   0   0   0   0      0   0   0   0   0   0   0   0   0\n",
+      "0   0   0   0   0   0   0   0      0   0   0   0   0   0   0   0   0\n",
+      "0   0   0   0   0   0   0   0      0   0   0   0   0   0   0   0   0\n",
+      "0   0   0   0   0   0   0   0      0   0   0   0   0   0   0   0   0\n",
+      "0   0   0   0   0   0   0   0      0   0   0   0   0   0   0   0   0\n"
+     ]
+    },
+    {
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfAAAAFaCAYAAADhKw9uAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAARl0lEQVR4nO3df6zld13n8dd7OzbQFpbaKUp/YOluwWWJSnckIJF1KWQLshSzmxV2MUXdNNEVCkGxaIIkm2zIalhNNJhuwTaxAd1SBV1FKv5gSdjqtFChFKWh0A5UOlOCoGu2Ft/7xz01l8vcucM9Z+bM+/J4JJN7fnzv+b4/nbn3eb/fc+5pdXcAgFn+0boHAAC+dgIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAwk4HAKq6pPVdXzttz2iqr6wAoeu6vqny77OMB6CDgADCTgMFhVnVdV76yqw1V1T1W9atN9z6iqD1bVF6rq/qr6xao6fXHf+xeb3VFVf11V319V31NVh6rqdVX1wOJzXlJVL6yqv6iqz1fVTx3P4y/u76p6VVV9sqqOVNXPVpXvObAivphgqEUMfyvJHUnOT3JZkldX1b9ebPLlJK9Jsj/Jsxb3/2iSdPdzFtt8e3ef1d2/trj+zUketXi8NyT5H0lenuRfJPnuJG+oqot3evxNvi/JgSSXJrkiyQ+tYu1AUt4LHU5dVfWpbATy4U03n57k9iSvTfI/u/uJm7Z/fZInd/cPHuWxXp3kX3b39y2ud5JLuvvuxfXvSfK7Sc7q7i9X1WOSfDHJM7v71sU2tyX5L939m8f5+C/o7vcsrv9okn/b3Zct8Z8EWNi37gGAHb2ku3//kStV9Yok/ynJtyQ5r6q+sGnb05L878V2T07y5mwcAZ+Rja/323bY14Pd/eXF5b9dfPzcpvv/NslZX8Pj37fp8qeTnLfD/oHj5BQ6zHVfknu6+3Gb/jymu1+4uP8tST6ejaPsxyb5qSS1wv0fz+NfuOnyE5N8doX7h69rAg5z/UmSL1bVT1bVo6vqtKp6WlV95+L+R06B/3VVfWuSH9ny+Z9LcnF2b6fHT5KfqKqzq+rCJFcn+bWjbAPsgoDDUItT3f8myXckuSfJkSTXJfnHi01+PMl/SPKlbLwYbWs835jkhsWryP/9LkbY6fGT5F3ZOK3+4ST/K8lbd7Ef4Ci8iA04Iba+SA5YLUfgADCQgAPAQE6hA8BAjsABYKCT+kYu+/ef0xc98cKdNwQAkiS3feiOI9197tbbT2rAL3rihTn4gd/feUMAIElSZ5776aPd7hQ6AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMtFTAq+ryqvrzqrq7qq5Z1VAAwLHtOuBVdVqSX0rygiRPTfKyqnrqqgYDALa3zBH4M5Lc3d2f7O6HkrwjyRWrGQsAOJZlAn5+kvs2XT+0uO0rVNVVVXWwqg4ePvLgErsDAB6xTMDrKLf1V93QfW13H+juA+fuP2eJ3QEAj1gm4IeSbP6fe1+Q5LPLjQMAHI9lAv6nSS6pqidV1elJXprk3asZCwA4ln27/cTufriqfizJ7yU5LcnbuvvOlU0GAGxr1wFPku7+nSS/s6JZAIDj5J3YAGAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABlrq98CBU88bL33CydvX7feftH0BX8kROAAMJOAAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMJOAAMNC+dQ8ArNYbb79/3SMAJ4EjcAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWCgXQe8qi6sqj+sqruq6s6qunqVgwEA21vmvdAfTvLa7r69qh6T5LaquqW7P7ai2QCAbez6CLy77+/u2xeXv5TkriTnr2owAGB7K3kOvKouSvL0JLce5b6rqupgVR08fOTBVewOAL7uLR3wqjoryTuTvLq7v7j1/u6+trsPdPeBc/efs+zuAIAsGfCq+oZsxPvG7r55NSMBADtZ5lXoleStSe7q7jevbiQAYCfLHIE/O8kPJHluVX148eeFK5oLADiGXf8aWXd/IEmtcBYA4Dh5JzYAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgoKUDXlWnVdWHquq3VzEQALCzVRyBX53krhU8DgBwnJYKeFVdkOR7k1y3mnEAgOOx7BH4zyd5XZK/326Dqrqqqg5W1cHDRx5ccncAQLJEwKvqRUke6O7bjrVdd1/b3Qe6+8C5+8/Z7e4AgE2WOQJ/dpIXV9WnkrwjyXOr6ldXMhUAcEy7Dnh3v767L+jui5K8NMkfdPfLVzYZALAtvwcOAAPtW8WDdPcfJfmjVTwWALAzR+AAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADCQgAPAQAIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAwk4AAwkIADwEACDgADCTgADCTgADDQUgGvqsdV1U1V9fGququqnrWqwQCA7e1b8vN/Icl7uvvfVdXpSc5YwUwAwA52HfCqemyS5yR5RZJ090NJHlrNWADAsSxzCv3iJIeT/EpVfaiqrquqM7duVFVXVdXBqjp4+MiDS+wOAHjEMgHfl+TSJG/p7qcn+Zsk12zdqLuv7e4D3X3g3P3nLLE7AOARywT8UJJD3X3r4vpN2Qg6AHCC7Trg3f2XSe6rqqcsbrosycdWMhUAcEzLvgr9lUluXLwC/ZNJfnD5kQCAnSwV8O7+cJIDK5oFADhO3okNAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGGipgFfVa6rqzqr6aFW9vaoetarBAIDt7TrgVXV+klclOdDdT0tyWpKXrmowAGB7y55C35fk0VW1L8kZST67/EgAwE52HfDu/kySn0tyb5L7k/xVd79363ZVdVVVHayqg4ePPLj7SQGAf7DMKfSzk1yR5ElJzktyZlW9fOt23X1tdx/o7gPn7j9n95MCAP9gmVPoz0tyT3cf7u6/S3Jzku9azVgAwLEsE/B7kzyzqs6oqkpyWZK7VjMWAHAsyzwHfmuSm5LcnuQji8e6dkVzAQDHsG+ZT+7un0nyMyuaBQA4Tt6JDQAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABhJwABhIwAFgIAEHgIEEHAAGEnAAGEjAAWAgAQeAgQQcAAYScAAYSMABYCABB4CBBBwABtox4FX1tqp6oKo+uum2b6yqW6rqE4uPZ5/YMQGAzY7nCPz6JJdvue2aJO/r7kuSvG9xHQA4SXYMeHe/P8nnt9x8RZIbFpdvSPKSFc8FABzDbp8D/6buvj9JFh8fv92GVXVVVR2sqoOHjzy4y90BAJud8Bexdfe13X2guw+cu/+cE707APi6sNuAf66qnpAki48PrG4kAGAnuw34u5Ncubh8ZZJ3rWYcAOB4HM+vkb09yQeTPKWqDlXVDyd5U5LnV9Unkjx/cR0AOEn27bRBd79sm7suW/EsAMBx8k5sADCQgAPAQAIOAAMJOAAMJOAAMJCAA8BAAg4AAwk4AAxU3X3ydlZ1OMmnv8ZP25/kyAkY51SwV9e2V9eV7N217dV1JXt3bXt1XcneXdtu1/Ut3X3u1htPasB3o6oOdveBdc9xIuzVte3VdSV7d217dV3J3l3bXl1XsnfXtup1OYUOAAMJOAAMNCHg1657gBNor65tr64r2btr26vrSvbu2vbqupK9u7aVruuUfw4cAPhqE47AAYAtBBwABjqlA15Vl1fVn1fV3VV1zbrnWYWqurCq/rCq7qqqO6vq6nXPtGpVdVpVfaiqfnvds6xKVT2uqm6qqo8v/u6ete6ZVqWqXrP4t/jRqnp7VT1q3TPtRlW9raoeqKqPbrrtG6vqlqr6xOLj2euccbe2WdvPLv49/llV/UZVPW6dM+7G0da16b4fr6quqv3rmG1Z262tql656NqdVfXfltnHKRvwqjotyS8leUGSpyZ5WVU9db1TrcTDSV7b3f8syTOT/Oc9sq7Nrk5y17qHWLFfSPKe7v7WJN+ePbK+qjo/yauSHOjupyU5LclL1zvVrl2f5PItt12T5H3dfUmS9y2uT3R9vnpttyR5Wnd/W5K/SPL6kz3UClyfr15XqurCJM9Pcu/JHmiFrs+WtVXVv0pyRZJv6+5/nuTnltnBKRvwJM9Icnd3f7K7H0ryjmwsfLTuvr+7b19c/lI2QnD+eqdanaq6IMn3Jrlu3bOsSlU9Nslzkrw1Sbr7oe7+wnqnWql9SR5dVfuSnJHks2ueZ1e6+/1JPr/l5iuS3LC4fEOSl5zUoVbkaGvr7vd298OLq/8nyQUnfbAlbfN3liT/Pcnrkox9lfU2a/uRJG/q7v+32OaBZfZxKgf8/CT3bbp+KHsodElSVRcleXqSW9c7yUr9fDa+8P5+3YOs0MVJDif5lcVTA9dV1ZnrHmoVuvsz2TgKuDfJ/Un+qrvfu96pVuqbuvv+ZOOH5ySPX/M8J8oPJfnddQ+xClX14iSf6e471j3LCfDkJN9dVbdW1R9X1Xcu82CncsDrKLeN/Wlsq6o6K8k7k7y6u7+47nlWoapelOSB7r5t3bOs2L4klyZ5S3c/PcnfZO6p2K+weE74iiRPSnJekjOr6uXrnYqvRVX9dDaemrtx3bMsq6rOSPLTSd6w7llOkH1Jzs7G06c/keTXq+porTsup3LADyW5cNP1CzL01N5WVfUN2Yj3jd1987rnWaFnJ3lxVX0qG095PLeqfnW9I63EoSSHuvuRMyU3ZSPoe8HzktzT3Ye7+++S3Jzku9Y80yp9rqqekCSLj0udsjzVVNWVSV6U5D/23nhTj3+SjR8m71h8H7kgye1V9c1rnWp1DiW5uTf8STbOVO76RXqncsD/NMklVfWkqjo9Gy+sefeaZ1ra4qettya5q7vfvO55Vqm7X9/dF3T3Rdn4+/qD7h5/NNfdf5nkvqp6yuKmy5J8bI0jrdK9SZ5ZVWcs/m1elj3yAr2Fdye5cnH5yiTvWuMsK1VVlyf5ySQv7u7/u+55VqG7P9Ldj+/uixbfRw4luXTxNbgX/GaS5yZJVT05yelZ4v+6dsoGfPHijB9L8nvZ+Iby691953qnWolnJ/mBbBydfnjx54XrHoodvTLJjVX1Z0m+I8l/XfM8K7E4q3BTktuTfCQb3xNGvo1lVb09yQeTPKWqDlXVDyd5U5LnV9UnsvGq5jetc8bd2mZtv5jkMUluWXwf+eW1DrkL26xrT9hmbW9LcvHiV8vekeTKZc6ceCtVABjolD0CBwC2J+AAMJCAA8BAAg4AAwk4AAwk4AAwkIADwED/H3ZBvi8oWJldAAAAAElFTkSuQmCC\n",
+      "text/plain": [
+       ""
+      ]
+     },
+     "metadata": {
+      "needs_background": "light"
+     },
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "a = {'v': (0, 1), 'w': 0}\n",
+    "z = (2, 3, 5, 7)\n",
+    "S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m, S)\n",
+    "grid = [[0]*17 for _ in range(11)]\n",
+    "for x, y, _ in S:\n",
+    "    if 0 <= x < 11 and 0 <= y < 17:\n",
+    "        grid[x][y] += 1\n",
+    "print(\"GRID:\")\n",
+    "print_table(grid)\n",
+    "heatmap(grid, cmap='Oranges')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "In this case, the robot is 99.9% certain that it is at position `(6, 7)`."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## DECISION THEORETIC AGENT\n",
+    "We now move into the domain of probabilistic decision making.\n",
+    "def  DTAgentProgram ( belief_state ): \n",
+       "    """A decision-theoretic agent. [Figure 13.1]""" \n",
+       "    def  program ( percept ): \n",
+       "        belief_state . observe ( program . action ,  percept ) \n",
+       "        program . action  =  argmax ( belief_state . actions (), \n",
+       "                                key = belief_state . expected_outcome_utility ) \n",
+       "        return  program . action \n",
+       "    program . action  =  None \n",
+       "    return  program \n",
+       ""
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "psource(DTAgentProgram)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The `DTAgentProgram` function is pretty self-explanatory.\n",
+    "