MultiAgentDecisionProcess  Release 0.2.1
TOIDecPOMDPDiscrete.cpp
Go to the documentation of this file.
1 
28 #include "TOIDecPOMDPDiscrete.h"
30 #include "RewardModelMapping.h"
31 
32 using namespace std;
33 
36  string name, string descr, string pf) :
38 {
39  _m_initialized = false;
40  _m_p_rModel = 0;
41 }
42 
44 {
45  throw(E("TOIDecPOMDPDiscrete: copy ctor not yet implemented"));
46 }
47 //Destructor
49 {
50  delete(_m_p_rModel);
51 }
52 //Copy assignment operator
54 {
55  if (this == &o) return *this; // Gracefully handle self assignment
56  // Put the normal assignment duties here...
57 
58  throw(E("TOIDecPOMDPDiscrete: assignment not yet implemented"));
59 
60  return *this;
61 }
62 
64  Index agentI)
65 {
66  if(_m_individualDecPOMDPDs.size()<=agentI)
67  _m_individualDecPOMDPDs.resize(agentI+1);
68 
69  _m_individualDecPOMDPDs[agentI]=model;
70 }
71 
73 {
74  if(b == false)
75  {
76  _m_initialized = false;
77  return(true);
78  }
79 
81  )
82  {
83  if( b == true )
84  {
85  if (_m_p_rModel == 0)
86  throw E("TOIDecPOMDPDiscrete::SetInitialized(true) : no reward model specified yet! ( _m_p_rModel == 0 )");
87 
88  }
89  _m_initialized = b;
90  return(true);
91  }
92  else
93  return(false);
94 }
95 
97 {
98  if(_m_initialized)
99  delete(_m_p_rModel);
100 #if 0
101  // cannot call GetNrJointStates() and GetNrJointActions() because
102  // we're not initialized yet
103  size_t nrJS=1, nrJA=1;
104  for(Index i=0;i!=GetNrAgents();++i)
105  {
106  nrJS*=GetIndividualMADPD(i)->GetNrStates();
108  }
109 
110  if(GetSparse())
112  nrJA);
113  else
114  _m_p_rModel = new RewardModelMapping(nrJS,
115  nrJA);
116 #else
118 #endif
119 }
120 
122 {
123  stringstream ss;
125  ss << DecPOMDP::SoftPrint();
126 
127  if(_m_initialized)
128  {
129  ss << "Reward model: " << endl;
130  ss << _m_p_rModel->SoftPrint();
131  }
132  else
133  throw E("TOIDecPOMDPDiscrete components (reward model) not initialized");
134 
135  return(ss.str());
136 }