MultiAgentDecisionProcess
Release 0.2.1
|
POSGDiscrete represent a discrete POSG model. More...
#include <POSGDiscrete.h>
Public Member Functions | |
virtual POSGDiscrete * | Clone () const =0 |
Returns a copy of this class. | |
void | CreateNewRewardModel (Index agentI, size_t nrS, size_t nrJA) |
Creates a new reward model. | |
double | GetReward (Index agentI, Index sI, Index jaI) const |
Return the reward for state, joint action indices. | |
double | GetReward (Index agentI, State *s, JointAction *ja) const |
POSGDiscrete (std::string name="received unspec. by POSGDiscrete", std::string descr="received unspec. by POSGDiscrete", std::string pf="received unspec. by POSGDiscrete") | |
Default constructor. | |
bool | SetInitialized (bool b) |
Sets _m_initialized to b. | |
void | SetReward (Index agentI, Index sI, Index jaI, double r) |
Set the reward for state, joint action indices. | |
void | SetReward (Index agentI, Index sI, Index jaI, Index sucSI, double r) |
Set the reward for state, joint action , suc. state indices. | |
void | SetReward (Index agentI, Index sI, Index jaI, Index sucSI, Index joI, double r) |
Set the reward for state, joint action, suc.state, joint obs indices. | |
std::string | SoftPrint () const |
Prints some information on the POSGDiscrete. | |
~POSGDiscrete () | |
Destructor. | |
![]() | |
virtual void | CreateNewRewardModelForAgent (Index agentI)=0 |
Creates a new reward model mapping. | |
virtual double | GetRewardForAgent (Index agentI, Index sI, Index jaI) const =0 |
Return the reward for state, joint action indices. | |
virtual void | SetRewardForAgent (Index agentI, Index sI, Index jaI, double r)=0 |
Set the reward for state, joint action indices. | |
virtual void | SetRewardForAgent (Index agentI, Index sI, Index jaI, Index sucSI, double r)=0 |
Set the reward for state, joint action , suc. state indices. | |
virtual void | SetRewardForAgent (Index agentI, Index sI, Index jaI, Index sucSI, Index joI, double r)=0 |
Set the reward for state, joint action, suc.state, joint obs indices. | |
virtual | ~POSGDiscreteInterface () |
Destructor.Can't make a virt.destr. pure abstract! | |
![]() | |
virtual | ~MultiAgentDecisionProcessDiscreteInterface () |
Destructor. Can't make a virt.destr. pure abstract! | |
![]() | |
virtual | ~MultiAgentDecisionProcessInterface () |
Destructor. | |
![]() | |
virtual double | GetDiscountForAgent (Index agentI) const =0 |
Returns the discount parameter. | |
virtual double | GetRewardForAgent (Index agentI, State *s, JointAction *ja) const =0 |
Function that returns the reward for a state and joint action. | |
virtual reward_t | GetRewardTypeForAgent (Index agentI) const =0 |
Returns the reward type. | |
virtual void | SetDiscountForAgent (Index agentI, double d)=0 |
Sets the discount parameter to 0 < d <= 1. | |
virtual void | SetRewardForAgent (Index agentI, State *s, JointAction *ja, double r)=0 |
Function that sets the reward for an agent, state and joint action. | |
virtual void | SetRewardTypeForAgent (Index agentI, reward_t r)=0 |
Sets the reward type to reward_t r. | |
virtual | ~POSGInterface () |
Virtual destructor. | |
![]() | |
void | CreateNewObservationModel () |
Creates a new observation model mapping. | |
void | CreateNewTransitionModel () |
Creates a new transition model mapping. | |
const ObservationModelDiscrete * | GetObservationModelDiscretePtr () const |
Returns a pointer to the underlying observation model. | |
double | GetObservationProbability (Index jaI, Index sucSI, Index joI) const |
Return the probability of joint observation joI: P(joI|jaI,sucSI). | |
OGet * | GetOGet () const |
bool | GetSparse () const |
Are we using sparse transition and observation models? | |
TGet * | GetTGet () const |
const TransitionModelDiscrete * | GetTransitionModelDiscretePtr () const |
Returns a pointer to the underlying transition model. | |
double | GetTransitionProbability (Index sI, Index jaI, Index sucSI) const |
Return the probability of successor state sucSI: P(sucSI|sI,jaI). | |
bool | Initialize () |
A function that can be called by other classes in order to request a MultiAgentDecisionProcessDiscrete to (try to) initialize. | |
MultiAgentDecisionProcessDiscrete () | |
Default constructor. | |
MultiAgentDecisionProcessDiscrete (std::string name="received unspec. by MultiAgentDecisionProcessDiscrete", std::string descr="received unspec.by MultiAgentDecisionProcessDiscrete", std::string pf="received unspec. by MultiAgentDecisionProcessDiscrete") | |
Constructor that sets the. | |
MultiAgentDecisionProcessDiscrete (int nrAgents, int nrS, std::string name="received unspec. by MultiAgentDecisionProcessDiscrete", std::string descr="received unspec.by MultiAgentDecisionProcessDiscrete", std::string pf="received unspec. by MultiAgentDecisionProcessDiscrete") | |
Constructor that sets the. | |
void | Print () const |
Prints some information on the MultiAgentDecisionProcessDiscrete. | |
Index | SampleJointObservation (Index jaI, Index sucI) const |
Sample an observation. | |
Index | SampleSuccessorState (Index sI, Index jaI) const |
Sample a successor state. | |
void | SetObservationModelPtr (ObservationModelDiscrete *ptr) |
Set the obversation model. | |
void | SetObservationProbability (Index jaI, Index sucSI, Index joI, double p) |
Set the probability of joint observation joI: P(joI|jaI,sucSI). | |
void | SetSparse (bool sparse) |
Indicate whether sparse transition and observation models should be used. | |
void | SetTransitionModelPtr (TransitionModelDiscrete *ptr) |
Set the transition model. | |
void | SetTransitionProbability (Index sI, Index jaI, Index sucSI, double p) |
Set the probability of successor state sucSI: P(sucSI|sI,jaI). | |
~MultiAgentDecisionProcessDiscrete () | |
Destructor. | |
![]() | |
double | GetDiscount (Index agentI) const |
Returns the discount parameter for agent agentI. | |
reward_t | GetRewardType (Index agentI) const |
Returns the reward type. | |
POSG () | |
void | SetDiscount (Index agentI, double d) |
Sets the discount parameter of agentI to d. | |
void | SetNrAgents (size_t nrAgents) |
Sets the number of agents. | |
void | SetRewardType (Index agentI, reward_t r) |
Sets the reward type to reward_t r. |
Protected Attributes | |
std::vector< RewardModelMapping * > | _m_p_rModel |
The reward model used by POSGDiscrete is a RewardModelMapping. |
Private Attributes | |
bool | _m_initialized |
Boolean that tracks whether this POSG is initialized. |
POSGDiscrete represent a discrete POSG model.
It implements POSGDiscreteInterface.
Also it inherits -MultiAgentDecisionProcessDiscrete -POSG
and thus implements -POSGInterface -MultiAgentDecisionProcessDiscreteInterface -MultiAgentDecisionProcessInterface
Definition at line 54 of file POSGDiscrete.h.
POSGDiscrete::POSGDiscrete | ( | std::string | name = "received unspec. by POSGDiscrete" , |
std::string | descr = "received unspec. by POSGDiscrete" , |
||
std::string | pf = "received unspec. by POSGDiscrete" |
||
) |
Default constructor.
Constructor that sets the name, description, and problem file, and subsequently loads this problem file.
Definition at line 32 of file POSGDiscrete.cpp.
References _m_initialized, _m_p_rModel, MultiAgentDecisionProcess::GetNrAgents(), SetInitialized(), and POSG::SetNrAgents().
POSGDiscrete::~POSGDiscrete | ( | ) |
|
pure virtual |
Returns a copy of this class.
Reimplemented from MultiAgentDecisionProcessDiscrete.
void POSGDiscrete::CreateNewRewardModel | ( | Index | agentI, |
size_t | nrS, | ||
size_t | nrJA | ||
) |
Creates a new reward model.
Definition at line 62 of file POSGDiscrete.cpp.
|
inline |
Return the reward for state, joint action indices.
Definition at line 102 of file POSGDiscrete.h.
References _m_p_rModel.
Referenced by GetReward(), and SetReward().
double POSGDiscrete::GetReward | ( | Index | agentI, |
State * | s, | ||
JointAction * | ja | ||
) | const |
Definition at line 90 of file POSGDiscrete.cpp.
References GetReward().
bool POSGDiscrete::SetInitialized | ( | bool | b | ) |
Sets _m_initialized to b.
When setting to true, a verification of member elements is performed. (i.e. a check whether all vectors have the correct size and non-zero entries)
Reimplemented from MultiAgentDecisionProcessDiscrete.
Definition at line 50 of file POSGDiscrete.cpp.
References _m_initialized, and MultiAgentDecisionProcessDiscrete::SetInitialized().
Referenced by POSGDiscrete().
|
inline |
Set the reward for state, joint action indices.
Definition at line 89 of file POSGDiscrete.h.
References _m_p_rModel.
Referenced by SetReward().
void POSGDiscrete::SetReward | ( | Index | agentI, |
Index | sI, | ||
Index | jaI, | ||
Index | sucSI, | ||
double | r | ||
) |
Set the reward for state, joint action , suc. state indices.
Definition at line 98 of file POSGDiscrete.cpp.
References GetReward(), MultiAgentDecisionProcessDiscrete::GetTransitionProbability(), and SetReward().
void POSGDiscrete::SetReward | ( | Index | agentI, |
Index | sI, | ||
Index | jaI, | ||
Index | sucSI, | ||
Index | joI, | ||
double | r | ||
) |
Set the reward for state, joint action, suc.state, joint obs indices.
Definition at line 106 of file POSGDiscrete.cpp.
|
virtual |
Prints some information on the POSGDiscrete.
Reimplemented from MultiAgentDecisionProcessDiscrete.
Definition at line 70 of file POSGDiscrete.cpp.
References _m_initialized, _m_p_rModel, and MultiAgentDecisionProcess::GetNrAgents().
|
private |
Boolean that tracks whether this POSG is initialized.
Definition at line 61 of file POSGDiscrete.h.
Referenced by POSGDiscrete(), SetInitialized(), and SoftPrint().
|
protected |
The reward model used by POSGDiscrete is a RewardModelMapping.
Definition at line 66 of file POSGDiscrete.h.
Referenced by GetReward(), POSGDiscrete(), SetReward(), SoftPrint(), and ~POSGDiscrete().