@InProceedings{LR83, replaced-by = { LR86 }, author = { Frank Thomson Leighton and Ronald L. Rivest }, title = { Estimating a Probability Using Finite Memory (Extended Abstract) }, pages = { 255-269 }, doi = { 10.1007/3-540-12689-9_109 }, booktitle = { Proceedings of the 1983 International FCT--Conference on Foundations of Computation Theory }, isbn = { 3-540-12689-9 }, editor = { Marek Karpinski }, date = { 1983-08 }, OPTyear = { 1983 }, OPTmonth = { August }, series = { Lecture Notes in Computer Science }, volume = { 158 }, publisher = { Springer }, eventtitle = { FCT'83 }, eventdate = { 1983-08-21/1983-08-27 }, venue = { Borgholm, Sweden }, abstract = { Let $\{X_i\}_{i=1}^\infty$ be a sequence of independent Bernoulli random variables with probability $p$ that $X_i=1$ and probability $q=1-p$ that $X_i=0$ for all $i\ge 1$. We consider time-invariant finite-memory (i.e., finite-state estimation procedures for the parameter $p$ which take $X_1,\ldots$ as an input sequence. In particular, we describe an $n$-state deterministic estimation procedure that can estimate $p$ with mean-square error $O(\log n / n)$ and an $n$-state probabilistic estimation procedure that can estimate $p$ with mean-square error $O(1/n)$. We prove that the $O(1/n)$ bound is optimal to within a constant factor. In addition, we show that linear estimation procedures are just as powerful (up to the measure of mean-square error) as arbitrary estimation procedures. The proofs are based on the Markov Chain Tree Theorem. }, }