MultiAgentDecisionProcess  Release 0.2.1
JESPDynamicProgrammingPlanner.h
Go to the documentation of this file.
1 
28 /* Only include this header file once. */
29 #ifndef _JESPDYNAMICPROGRAMMINGPLANNER_H_
30 #define _JESPDYNAMICPROGRAMMINGPLANNER_H_ 1
31 
32 /* the include directives */
33 #include <iostream>
34 #include "Globals.h"
36 //#include "ValueFunctionDecPOMDPDiscrete.h"
37 #include "JointPolicyPureVector.h"
38 //#include "JointPolicy.h"
39 
41 //class JointPolicyPureVector;
42 class JointPolicy;
44 
46 
49 {
50  private:
51  //the best found policy
53  //the expected reward of the best found policy
55  //intermediate result from ExhaustiveBestResponse
56  //JointPolicyPureVector _m_exhBRBestPol;
57  protected:
59 
69  const Index agentI,
70  const Index aohI,
71  const IndividualBeliefJESP& B,
72  const Index stage,
74  std::vector<Index>& new_pol
75  );
77  const Index agentI, //the agent we are computing for
78  const Index aohI, //the action-observation history of agentI
79  const Index ohI, //the observation history of agentI
80  const Index stage, //the stage of B
81  JointPolicyPureVector* jpol,//the joint policy
82  std::vector<Index>& new_pol
83  );
84  public:
85 
86  // Constructor, destructor and copy assignment.
87  // (default) Constructor
88  //JESPDynamicProgrammingPlanner();
91  size_t horizon, DecPOMDPDiscreteInterface* p);
92  JESPDynamicProgrammingPlanner(int horizon,
94 
95  //operators:
96 
97  //data manipulation (set) functions:
100  void Plan();
102  Index agentI);
103 
104  //get (data) functions:
106  { return(&_m_foundPolicy); }
108  { return(&_m_foundPolicy); }
110  { return(&_m_foundPolicy); }
111  double GetExpectedReward(void) const
112  { return(_m_expectedRewardFoundPolicy); }
113 
114 };
115 
116 
117 #endif /* !_JESPDYNAMICPROGRAMMINGPLANNER_H_ */
118 
119 
120 // Local Variables: ***
121 // mode:c++ ***
122 // End: ***