The original motivation to build a quantum computer came from Feynman, who imagined a machine capable of simulating generic quantum mechanical systems\–-a task that is believed to be intractable for classical computers. Such a machine could have farreaching applications in the simulation of many-body quantum physics in condensed-matter, chemical and high-energy systems. Part of Feynman\&$\#$39;s challenge was met by Lloyd, who showed how to approximately decompose the time evolution operator of interacting quantum particles into a short sequence of elementary gates, suitable for operation on a quantum computer. However, this left open the problem of how to simulate the equilibrium and static properties of quantum systems. This requires the preparation of ground and Gibbs states on a quantum computer. For classical systems, this problem is solved by the ubiquitous Metropolis algorithm, a method that has basically acquired a monopoly on the simulation of interacting particles. Here we demonstrate how to implement a quantum version of the Metropolis algorithm. This algorithm permits sampling directly from the eigenstates of the Hamiltonian, and thus evades the sign problem present in classical simulations. A small-scale implementation of this algorithm should be achievable with today\&$\#$39;s technology.

}, url = {http://www.physique.usherbrooke.ca/pages/sites/default/files/poulin/TOVP11a.pdf}, author = {K. Temme and T.J. Osborne and K. Vollbrecht and Poulin, D. and F. Verstraete} } @booklet {PQSV11a, title = {Quantum simulation of time-dependent Hamiltonians and the convenient illusion of Hilbert space}, year = {2011}, author = {Poulin, D. and Angie Quarry and Rolando D. Somma and Frank Verstraete} } @booklet {BDP11a, title = {Universal topological phase of 2D stabilizer codes}, year = {2011}, abstract = {Two topological phases are equivalent if they are connected by a local unitary transformation. In this sense, classifying topological phases amounts to classifying long-range entanglement patterns. We show that all 2D topological stabilizer codes are equivalent to several copies of one universal phase: Kitaev{\textquoteright}s topological code. Error correction benefits from the corresponding local mappings.}, keywords = {Topological QC}, author = {Hector Bombin and Guillaume Duclos-Cianci and Poulin, D.} } @article {BP10b, title = {Coarse grained belief propagation for simulation of interacting quantum systems at all temperatures}, journal = {Physical Review B}, volume = {81}, year = {2010}, pages = {054106}, abstract = {We continue our numerical study of quantum belief propagation initiated in \cite{PB08a}. We demonstrate how the method can be expressed in terms of an effective thermal potential that materializes when the system presents quantum correlations, but is insensitive to classical correlations. The thermal potential provides an efficient means to assess the precision of belief propagation on graphs with no loops. We illustrate these concepts using the one-dimensional quantum Ising model and compare our results with exact solutions. We also use the method to study the transverse field quantum Ising spin glass for which we obtain a phase diagram that is largely in agreement with the one obtained in \cite{LSS07a} using a different approach. Finally, we introduce the coarse grained belief propagation (CGBP) algorithm to improve belief propagation at low temperatures. This method combines the reliability of belief propagation at high temperatures with the ability of entanglement renormalization to efficiently describe low energy subspaces of quantum systems with local interactions. With CGBP, thermodynamic properties of quantum systems can be calculated with a high degree of accuracy at all temperatures.}, keywords = {Belief propagation, Simulation}, author = {E. Bilgin and Poulin, D.} } @article {CPFS10a, title = {Efficient quantum state tomography}, journal = {Nature Comm.}, volume = {1}, year = {2010}, pages = {149}, abstract = {Quantum state tomography{\textendash}-deducing quantum states from measured data{\textendash}-is the gold standard for verification and benchmarking of quantum devices. It has been realized in systems with few components, but for larger systems it becomes unfeasible because the number of measurements and the amount of computation required to process them grows exponentially in the system size. Here, we present two tomography schemes that scale much more favourably than direct tomography with system size. One of them requires unitary operations on a constant number of subsystems, whereas the other requires only local measurements together with more elaborate post-processing. Both rely only on a linear number of experimental operations and post-processing that is polynomial in the system size. These schemes can be applied to a wide range of quantum states, in particular those that are well approximated by matrix product states. The accuracy of the reconstructed states can be rigorously certified without any a priori assumptions.}, author = {M. Cramer and M.B. Plenio and S.T. Flammia and R. Somma and D. Gross and S.D. Bartlett and O. Landon-Cardinal and Poulin, D. and Y.-K. Liu} } @article {DP10a, title = {Fast decoders for topological quantum codes}, journal = {Physical Review Letters}, volume = {104}, year = {2010}, pages = {050504}, abstract = {We present a family of algorithms, combining real-space renormalization methods and belief propagation, to estimate the free energy of a topologically ordered system in the presence of defects. Such an algorithm is needed to preserve the quantum information stored in the ground space of a topologically ordered system and to decode topological error-correcting codes. For a system of linear size $\ell$, our algorithm runs in time ${\l}og\ell$ compared to $\ell^6$ needed for the minimum-weight perfect matching algorithm previously used in this context and achieves a higher depolarizing error threshold.}, keywords = {Error correction, Topological QC}, author = {Guillaume Duclos-Cianci and Poulin, D.} } @article {BNPV10a, title = {Information preserving structures: a general framework for quantum zero-error information}, journal = {Physical Review A}, volume = {82}, year = {2010}, pages = {062306}, abstract = {Quantum systems carry information. Quantum theory supports at least two distinct kinds of information (classical and quantum), and a variety of different ways to encode and preserve information in physical systems. A system{\textquoteright}s ability to carry information is constrained and defined by the noise in its dynamics. This paper introduces an operational framework, using informationpreserving structures to classify all the kinds of information that can be perfectly (i.e., with zero error) preserved by quantum dynamics. We prove that every perfectly preserved code has the same structure as a matrix algebra, and that preserved information can always be corrected. We also classify distinct operational criteria for preservation (e.g., {\textquoteleft}{\textquoteleft}noiseless{\textquoteright}{\textquoteright}, {\textquoteleft}{\textquoteleft}unitarily correctible{\textquoteright}{\textquoteright}, etc.) and introduce two new and natural criteria for measurement-stabilized and unconditionally preserved codes. Finally, for several of these operational critera, we present efficient [polynomial in the state-space dimension] algorithms to find all of a channel{\textquoteright}s information-preserving structures.}, author = {Blume-Kohout, R. and H.K. Ng and Poulin, D. and L. Viola} } @article {P10a, title = {Lieb-Robinson bound and locality for general Markovian quantum dynamics}, journal = {Physical Review Letters}, volume = {104}, year = {2010}, pages = {190401}, abstract = {The Lieb-Robinson bound shows the existence of a maximum speed of signal propagation in discrete quantum mechanical systems with local interactions. This generalizes the concept of relativistic causality beyond field theory, and provides a powerful tool in theoretical condensed matter physics and quantum information science. Here, we extend the scope of this seminal result by considering general Markovian quantum evolution, where we prove that an equivalent bound holds. In addition, we use the generalized bound to demonstrate that correlations in the stationary state of a Markov process decay on a length-scale set by the Lieb-Robinson velocity and the system{\textquoteright}s relaxation time.}, author = {Poulin, D.} } @article {BPT10a, title = {Tradeoffs for reliable quantum information storage in {2D} systems}, journal = {Physical Review Letters}, volume = {104}, year = {2010}, pages = {050503}, abstract = {We ask whether there are fundamental limits on storing quantum information reliably in a bounded volume of space. To investigate this question, we study quantum error correcting codes specified by geometrically local commuting constraints on a 2D lattice of finite-dimensional quantum particles. For these 2D systems, we derive a tradeoff between the number of encoded qubits $k$, the distance of the code $d$, and the number of particles $n$. It is shown that $kd^2=O(n)$ where the coefficient in $O(n)$ depends only on the locality of the constraints and dimension of the Hilbert spaces describing individual particles. The analogous tradeoff for the classical information storage is $k\sqrt{d} =O(n)$.}, keywords = {Error correction, Self correcting}, author = {S. Bravyi and Poulin, D. and B.M. Terhal} } @conference {BPT10b, title = {Tradeoffs for reliable quantum information storage in 2D systems}, booktitle = {Quantum Cryptography and Computing}, year = {2010}, pages = {125}, author = {S. Bravyi and Poulin, D. and B.M. Terhal}, editor = {R. Horodecki and S. Ya. Kilin and J. Kowalik} } @article {PW09a, title = {Preparing ground states of quantum many-body systems on a quantum computer}, journal = {Physical Review Letters}, volume = {102}, year = {2009}, pages = {130503}, abstract = {Preparing the ground state of a system of interacting classical particles is an NP-hard problem. Thus, there is in general no better algorithm to solve this problem than exhaustively going through all $N$ configurations of the system to determine the one with lowest energy, requiring a running time proportional to $N$. A quantum computer, if it could be built, could solve this problem in time $\sqrt N$. Here, we present a powerful extension of this result to the case of interacting {\em quantum} particles, demonstrating that a quantum computer can prepare the ground state of a quantum system as efficiently as it does for classical systems.}, keywords = {Complexity, Quantum simulation}, author = {Poulin, D. and Pawel Wocjan} } @article {PTO09a, title = {Quantum serial turbo-codes}, journal = {IEEE Trans. Info. Theor.}, volume = {55}, number = {6}, year = {2009}, pages = {2776}, abstract = {We present a theory of quantum serial turbo-codes, describe their iterative decoding algorithm, and study their performances numerically on a depolarization channel. Our construction offers several advantages over quantum LDPC codes. First, the Tanner graph used for decoding is free of 4-cycles that deteriorate the performances of iterative decoding. Secondly, the iterative decoder makes explicit use of the code{\textquoteright}s degeneracy. Finally, there is complete freedom in the code design in terms of length, rate, memory size, and interleaver choice. We define a quantum analogue of a state diagram that provides an efficient way to verify the properties of a quantum convolutional code, and in particular its recursiveness and the presence of catastrophic error propagation. We prove that all recursive quantum convolutional encoder have catastrophic error propagation. In our constructions, the convolutional codes have thus been chosen to be non-catastrophic and non-recursive. While the resulting families of turbo-codes have bounded minimum distance, from a pragmatic point of view the effective minimum distances of the codes that we have simulated are large enough not to degrade the iterative decoding performance up to reasonable word error rates and block sizes. With well chosen constituent convolutional codes, we observe an important reduction of the word error rate as the code length increases.}, keywords = {Error correction, Turbo Codes}, author = {Poulin, D. and J.-P. Tillich and Ollivier, H.} } @article {PW09b, title = {Sampling from the quantum Gibbs state and evaluating partition functions with a quantum computer}, journal = {Physical Review Letters}, volume = {103}, year = {2009}, pages = {220502}, abstract = {We present a quantum algorithm to prepare the thermal Gibbs state of interacting quantum systems. This algorithm sets a universal upper bound $D^\alpha$ on the thermalization time of a quantum system, where $D$ is the system{\textquoteright}s Hilbert space dimension and $\alpha {\l}eq \frac 12$ is proportional to the Helmholtz free energy density of the system. We also derive an algorithm to evaluate the partition function of a quantum system in a time proportional to the system{\textquoteright}s thermalization time and inversely proportional to the targeted accuracy squared.}, author = {Poulin, D. and Pawel Wocjan} } @article {PB08a, title = {Belief propagation algorithm for computing correlation functions in finite-temperature quantum many-body systems on loopy graphs}, journal = {Physical Review A}, volume = {77}, year = {2008}, pages = {052318}, abstract = {Belief propagation {\textendash}- a powerful heuristic method to solve inference problems involving a large number of random variables {\textendash}- was recently generalized to quantum theory. Like its classical counterpart, this algorithm is exact on trees when the appropriate independence conditions are met and is expected to provide reliable approximations when operated on loopy graphs. In this paper, we benchmark the performances of loopy quantum belief propagation (QBP) in the context of finite-temperature quantum many-body physics. Our results indicate that QBP provides reliable estimates of the high-temperature correlation function when the typical loop size in the graph is large. As such, it is suitable e.g. for the study of quantum spin glasses on Bethe lattices and the decoding of sparse quantum error correction codes.}, keywords = {Belief propagation, Simulation}, author = {Poulin, D. and E. Bilgin} } @article {PC08a, title = {On the iterative decoding of sparse quantum codes}, journal = {Quant. Info. and Comp.}, volume = {8}, year = {2008}, pages = {986}, abstract = {We address the problem of decoding sparse quantum error correction codes. For Pauli channels, this task can be accomplished by a version of the belief propagation algorithm used for decoding sparse classical codes. Quantum codes pose two new challenges however. Firstly, their Tanner graph unavoidably contain small loops which typically undermines the performance of belief propagation. Secondly, sparse quantum codes are by definition highly degenerate. The standard belief propagation algorithm does not exploit this feature, but rather it is impaired by it. We propose heuristic methods to improve belief propagation decoding, specifically targeted at these two problems. While our results exhibit a clear improvement due to the proposed heuristic methods, they also indicate that the main source of errors in the quantum coding scheme remains in the decoding.}, keywords = {Belief propagation, LDPC}, author = {Poulin, D. and Y. Chung} } @article {LP08a, title = {Quantum graphical models and belief propagation}, journal = {Ann. Phys.}, volume = {323}, year = {2008}, pages = {1899}, abstract = {Belief Propagation algorithms acting on Graphical Models of classical probability distributions, such as Markov Networks, Factor Graphs and Bayesian Networks, are amongst the most powerful known methods for deriving probabilistic inferences amongst large numbers of random variables. This paper presents a generalization of these concepts and methods to the quantum case, based on the idea that quantum theory can be thought of as a noncommutative, operator-valued, generalization of classical probability theory. Some novel characterizations of quantum conditional independence are derived, and definitions of Quantum n-Bifactor Networks, Markov Networks, Factor Graphs and Bayesian Networks are proposed. The structure of Quantum Markov Networks is investigated and some partial characterization results are obtained, along the lines of the Hammersely-Clifford theorem. A Quantum Belief Propagation algorithm is presented and is shown to converge on 1-Bifactor Networks and Markov Networks when the underlying graph is a tree. The use of Quantum Belief Propagation as a heuristic algorithm in cases where it is not known to converge is discussed. Applications to decoding quantum error correcting codes and to the simulation of many-body quantum systems are described.}, keywords = {Belief propagation, LDPC, Simulation, Turbo Codes}, author = {M.S. Leifer and Poulin, D.} } @article {GP08a, title = {Quantum reference frames and deformed symmetries}, journal = {Phys. Rev. D}, volume = {77}, year = {2008}, pages = {104012}, abstract = {In the context of constrained quantum mechanics, reference systems are used to construct relational observables that are invariant under the action of the symmetry group. Upon measurement of a relational observable, the reference system undergoes an unavoidable measurement {\textquoteleft}{\textquoteleft}back-action" that modifies its properties. In a quantum-gravitational setting, it has been argued that such a back-action may produce effects that are described at an effective level as a form of deformed (or doubly) special relativity. We examine this possibility using a simple constrained system that has been extensively studied in the context of quantum information. While our conclusions support the idea of a symmetry deformation, they also reveal a host of other effects that may be relevant to the context of quantum gravity, and could potentially conceal the symmetry deformation.}, keywords = {Foundations, Gravity, Reference frame}, author = {F. Girelli and Poulin, D.} } @conference {PTO08a, title = {Quantum serial turbo-codes}, booktitle = {ISIT{\textquoteright}08}, year = {2008}, publisher = {IEEE}, organization = {IEEE}, abstract = {We present a theory of quantum serial turbo-codes and study their performance numerically on a depolarization channel. These codes can be considered as a generalization of classical serial turbo-codes. As their classical cousins, they can be iteratively decoded and with well chosen constituent convolutional codes, we observe an important reduction of the word error rate as the number of encoded qubits increases. Our construction offers several advantages over quantum LDPC codes. First, the Tanner graph used for decoding can be chosen to be free of 4-cycles that deteriorate the performances of iterative decoding. Secondly, the iterative decoder makes explicit use of the code{\textquoteright}s degeneracy. Finally, there is complete freedom in the code design in terms of length, rate, memory size, and interleaver choice. We address two issues related to the encoding of convolutional codes that are directly relevant for turbo-codes, namely the character of being recursive and non-catastrophic. We define a quantum analogue of a state diagram that provides an efficient way to verify these properties on a given quantum convolutional encoder. Unfortunately, we also prove that all recursive quantum convolutional encoder have catastrophic error propagation. In our constructions, the convolutional codes have thus been chosen to be non-catastrophic and non-recursive. While the resulting families of turbo-codes have bounded minimum distance, from a pragmatic point of view the effective minimum distances of the codes that we have simulated are large enough for not degrading iterative decoding performance up to reasonable word error rates and block sizes.}, keywords = {Error correction, Turbo Codes}, author = {Poulin, D. and Jean-Pierre Tillich and Harold Ollivier} } @article {BNPV08a, title = {The structure of preserved information in quantum processes}, journal = {Physical Review Letters}, volume = {100}, year = {2008}, pages = {030501}, abstract = {We introduce a general operational characterization of information-preserving structures (IPS) {\textendash} encompassing noiseless subsystems, decoherence-free subspaces, pointer bases, and error-correcting codes {\textendash} by demonstrating that they are isometric to fixed points of unital quantum processes. Using this, we show that every IPS is a matrix algebra. We further establish a structure theorem for the fixed states and observables of an arbitrary process, which unifies the Schr{\"o}dinger and Heisenberg pictures, places restrictions on physically allowed kinds of information, and provides an efficient algorithm for finding all noiseless and unitarily noiseless subsystems of the process.}, keywords = {Error correction}, author = {Blume-Kohout, R. and H.K. Ng and Poulin, D. and L. Viola} } @article {NP07a, title = {Algebraic and information-theoretic conditions for operator quantum error-correction}, journal = {Physical Review A}, volume = {75}, year = {2007}, pages = {064304}, abstract = {Operator quantum error-correction is a technique for robustly storing quantum information in the presence of noise. It generalizes the standard theory of quantum error-correction, and provides a unified framework for topics such as quantum error-correction, decoherence-free subspaces, and noiseless subsystems. This paper develops (a) easily applied algebraic and information-theoretic conditions which characterize when operator quantum error-correction is feasible; (b) a representation theorem for a class of noise processes which can be corrected using operator quantum error-correction; and (c) generalizations of the coherent information and quantum data processing inequality to the setting of operator quantum error-correction.}, keywords = {Error correction, OQEC}, author = {Nielsen, M. A. and Poulin, D.} } @conference {GP07a, title = {Deformed symmetries from quantum relational observables}, booktitle = {J. of Phys.: Conf. Series, 12th Conference on Recent Developments in Gravity}, volume = {68}, number = {012025}, year = {2007}, abstract = {Deformed Special Relativity (DSR) is a candidate phenomenological theory to describe the Quantum Gravitational (QG) semi-classical regime. A possible interpretation of DSR can be derived from the notion of deformed reference frame. Observables in (quantum) General Relativity can be constructed from (quantum) reference frame {\textendash} a physical observable is then a relation between a system of interest and the reference frame. We present a toy model and study an example of such quantum relational observables. We show how the intrinsic quantum nature of the reference frame naturally leads to a deformation of the symmetries, comforting DSR to be a good candidate to describe the QG semi-classical regime.}, keywords = {Gravity, Reference frame}, author = {F. Girelli and Poulin, D.} } @article {PY07a, title = {Dynamics of a Quantum Reference Frame}, journal = {New J. Phys.}, volume = {9}, year = {2007}, pages = {156}, abstract = {We analyze a quantum mechanical gyroscope which is modeled as a large spin and used as a reference against which to measure the angular momenta of spin-1/2 particles. These measurements induce a back-action on the reference which is the central focus of our study. We begin by deriving explicit expressions for the quantum channel representing the back-action. Then, we analyze the dynamics incurred by the reference when it is used to sequentially measure particles drawn from a fixed ensemble. We prove that the reference thermalizes with the measured particles and find that generically, the thermal state is reached in time which scales linearly with the size of the reference. This contrasts a recent conclusion of Bartlett et al.\ that this takes a quadratic amount of time when the particles are completely unpolarized. We now understand their result in terms of a simple physical principle based on symmetries and conservation laws. Finally, we initiate the study of the non-equilibrium dynamics of the reference. Here we find that a reference in a coherent state will essentially remain in one when measuring polarized particles, while rotating itself to ultimately align with the polarization of the particles.}, keywords = {Reference frame}, author = {Poulin, D. and J. Yard} } @article {KLPL05a, title = {Operator quantum error correction}, journal = {Quant. Info. and Comp.}, volume = {6}, year = {2006}, pages = {382}, abstract = {This paper is an expanded and more detailed version of the work \cite{KLP04} in which the Operator Quantum Error Correction formalism was introduced. This is a new scheme for the error correction of quantum operations that incorporates the known techniques {\textendash}- i.e. the standard error correction model, the method of decoherence-free subspaces, and the noiseless subsystem method {\textendash}- as special cases, and relies on a generalized mathematical framework for noiseless subsystems that applies to arbitrary quantum operations. We also discuss a number of examples and introduce the notion of {\textquoteleft}{\textquoteleft}unitarily noiseless subsystems{\textquoteright}{\textquoteright}.}, keywords = {Error correction, OQEC}, author = {Kribs, D. W. and R. Laflamme and Poulin, D. and Lesosky, M.} } @article {Pou06b, title = {Optimal and Efficient Decoding of Concatenated Quantum Block Codes}, journal = {Physical Review A}, volume = {74}, year = {2006}, pages = {052333}, abstract = {We consider the problem of optimally decoding a quantum error correction code {\textendash}- that is to find the optimal recovery procedure given the outcomes of partial {\textquoteleft}{\textquoteleft}check" measurements on the system. In general, this problem is NP-hard. However, we demonstrate that for concatenated block codes, the optimal decoding can be efficiently computed using a message passing algorithm. We compare the performance of the message passing algorithm to that of the widespread blockwise hard decoding technique. Our Monte Carlo results using the 5 qubit and Steane{\textquoteright}s code on a depolarizing channel demonstrate significant advantages of the message passing algorithms in two respects. 1) Optimal decoding increases by as much as 94\% the error threshold below which the error correction procedure can be used to reliably send information over a noisy channel. 2) For noise levels below these thresholds, the probability of error after optimal decoding is suppressed at a significantly higher rate, leading to a substantial reduction of the error correction overhead.}, keywords = {Error correction}, author = {Poulin, D.} } @article {MP06a, title = {Relational time for systems of oscillators}, journal = {Int. J. Quant. Info.}, volume = {4}, year = {2006}, pages = {151}, abstract = {Using an elementary example based on two simple harmonic oscillators, we show how a relational time may be defined that leads to an approximate Schr{\"o}dinger dynamics for subsystems, with corrections leading to an intrinsic decoherence in the energy eigenstates of the subsystem.}, keywords = {Foundations, Gravity, Quantum Theory, Reference frame}, author = {Milburn, G. J. and Poulin, D.} } @article {Pou06a, title = {Toy model for a relational formulation of quantum theory}, journal = {Int. J. of Theor. Phys.}, volume = {45}, year = {2006}, pages = {1229}, abstract = {In the absence of an external frame of reference {\textendash}- i.e. in background independent theories such as general relativity {\textendash}- physical degrees of freedom must describe {\em relations} between systems. Using a simple model, we investigate how such a relational quantum theory naturally arises by promoting reference systems to the status of dynamical entities. Our goal is twofold. First, we demonstrate using elementary quantum theory how any quantum mechanical experiment admits a purely relational description at a fundamental level. Second, we describe how the original {\textquoteleft}{\textquoteleft}non-relational" theory approximately emerges from the fully relational theory when reference systems become semi-classical. Our technique is motivated by a Bayesian approach to quantum mechanics, and relies on the noiseless subsystem method of quantum information science used to protect quantum states against undesired noise. The relational theory naturally predicts a fundamental decoherence mechanism, so an arrow of time emerges from a time-symmetric theory. Moreover, our model circumvents the problem of the {\textquoteleft}{\textquoteleft}collapse of the wave packet" as the probability interpretation is only ever applied to diagonal density operators. Finally, the physical states of the relational theory can be described in terms of {\textquoteleft}{\textquoteleft}spin networks" introduced by Penrose as a combinatorial description of geometry, and widely studied in the loop formulation of quantum gravity. Thus, our simple bottom-up approach (starting from the semi-classical limit to derive the fully relational quantum theory) may offer interesting insights on the low energy limit of quantum gravity.}, keywords = {Foundations, Gravity, Measurement, Quantum Theory, Reference frame}, author = {Poulin, D.} } @article {REP+05a, title = {Characterization of Complex Quantum Dynamics with a Scalable NMR Information Processor}, journal = {Physical Review Letters}, volume = {95}, number = {25}, year = {2005}, pages = {250502}, publisher = {APS}, abstract = {We present experimental results on the measurement of fidelity decay under contrasting system dynamics using a nuclear magnetic resonance quantum information processor. The measurements were performed by implementing a scalable circuit in the model of deterministic quantum computation with only one quantum bit. The results show measurable differences between regular and complex behaviour and for complex dynamics are faithful to the expected theoretical decay rate. Moreover, we illustrate how the experimental method can be seen as an efficient way for either extracting coarse-grained information about the dynamics of a large system, or measuring the decoherence rate from engineered environments.}, keywords = {Quantum chaos, Quantum simulation}, url = {http://link.aps.org/abstract/PRL/v95/e250502}, author = {C. A. Ryan and J. Emerson and Poulin, D. and C. Negrevergne and R. Laflamme} } @booklet {PT05a, title = {Comment on {\textquoteleft}{\textquoteleft}Some non-conventional idesa about algorithmic complexity"}, year = {2005}, abstract = {We comment on a recent paper by D{\textquoteright}Abramo [Chaos, Solitons \& Fractals, 25 (2005) 29], focusing on the author{\textquoteright}s statement that an algorithm can produce a list of strings containing at least one string whose algorithmic complexity is greater than that of the entire list. We show that this statement, although perplexing, is not as paradoxical as it seems when the definition of algorithmic complexity is applied correctly.}, author = {Poulin, D. and Hugo Touchette} } @article {OPZ05a, title = {Environment as a Witness: Selective Proliferation of Information and Emergence of Objectivity}, journal = {Physical Review A}, volume = {72}, year = {2005}, pages = {042113}, abstract = {We study the role of the information deposited in the environment of an open quantum system in course of the decoherence process. Redundant spreading of information {\textendash}- the fact that some observables of the system can be independently {\textquoteleft}{\textquoteleft}read-off{\textquoteright}{\textquoteright} from many distinct fragments of the environment {\textendash}- is investigated as the key to effective objectivity, the essential ingredient of {\textquoteleft}{\textquoteleft}classical reality{\textquoteright}{\textquoteright}. This focus on the environment as a communication channel through which observers learn about physical systems underscores importance of quantum Darwinism {\textendash}- selective proliferation of information about {\textquoteleft}{\textquoteleft}the fittest states{\textquoteright}{\textquoteright} chosen by the dynamics of decoherence at the expense of their superpositions {\textendash}- as redundancy imposes the existence of preferred observables. We demonstrate that the only observables that can leave multiple imprints in the environment are the familiar pointer observables singled out by environment-induced superselection (einselection) for their predictability. Many independent observers monitoring the environment will therefore agree on properties of the system as they can only learn about preferred observables. In this operational sense, the selective spreading of information leads to appearance of an objective {\textquoteleft}{\textquoteleft}classical reality{\textquoteright}{\textquoteright} from within quantum substrate.}, keywords = {Decoherence, Foundations, Quantum Theory}, url = {http://www.arxiv.org/abs/quant-ph/0408125}, author = {Ollivier, H. and Poulin, D. and Zurek, W. H.} } @article {Pou05a, title = {Macroscopic Observables}, journal = {Physical Review A}, volume = {71}, year = {2005}, pages = {22102}, abstract = {We study macroscopic observables defined as the total value of a physical quantity over a collection of quantum systems. We show that previous results obtained for infinite ensemble of identically prepared systems lead to incorrect conclusions for finite ensembles. In particular, exact measurement of a macroscopic observable significantly disturbs the state of any finite ensemble. However, we show how this disturbance can be made arbitrarily small when the measurement are of finite accuracy. We demonstrate a general tradeoff between state disturbance and measurement coarseness as a function of the size of the ensemble. Using this tradeoff, we show that the histories generated by any sequence of finite accuracy macroscopic measurements always generate a consistent family in the absence of large scale entanglement, for sufficiently large ensembles. Hence, macroscopic observables behave {\textquoteleft}{\textquoteleft}classically" provided that their accuracy is coarser than the quantum correlation length-scale of the system. The role of these observable is also discussed in the context of NMR quantum information processing and bulk ensemble quantum state tomography.}, keywords = {Decoherence, Emergent, Foundations, Quantum Theory, Renormalization}, author = {Poulin, D.} } @article {Pou05b, title = {Stabilizer formalism for operator quantum error correction}, journal = {Physical Review Letters}, volume = {95}, year = {2005}, pages = {230504}, abstract = {Operator quantum error correction is a recently developed theory that provides a generalized framework for active error correction and passive error avoiding schemes. In this paper, we describe these codes in the stabilizer formalism of standard quantum error correction theory. This is achieved by adding a {\textquoteleft}{\textquoteleft}gauge" group to the standard stabilizer definition of a code that defines an equivalence class between encoded states. Gauge transformations leave the encoded information unchanged; their effect is absorbed by virtual gauge qubits that do not carry useful information. We illustrate the construction by identifying a gauge symmetry in Shor{\textquoteright}s 9-qubit code that allows us to remove 4 of its 8 stabilizer generators, leading to a simpler decoding procedure and a wider class of logical operations without affecting its essential properties. This opens the path to possible improvements of the error threshold of fault-tolerant quantum computing.}, keywords = {Error correction, OQEC}, author = {Poulin, D.} } @article {KLP05a, title = {A Unified and Generalized Approach to Quantum Error Correction}, journal = {Physical Review Letters}, volume = {94}, year = {2005}, pages = {180501}, abstract = {We present a unified approach to quantum error correction, called operator quantum error correction. Our scheme relies on a generalized notion of noiseless subsystem that is investigated here. By combining active error correction with this generalized noiseless subsystems method, we arrive at the unified approach which incorporates the known techniques {\textendash}- i.e. the standard error correction model, the method of decoherence-free subspaces, and the noiseless subsystem method {\textendash}- as special cases. Moreover, we demonstrate that the quantum error correction condition from the standard model is a necessary condition for all known methods of quantum error correction.}, keywords = {Error correction, OQEC}, author = {Kribs, D. and R. Laflamme and Poulin, D.} } @article {ELP+04a, title = {Estimation of the Local Density of States on a Quantum Computer}, journal = {Physical Review A}, volume = {69}, year = {2004}, pages = {050305}, abstract = {We report an efficient quantum algorithm for estimating the local density of states (LDOS) on a quantum computer. The LDOS describes the redistribution of energy levels of a quantum system under the influence of a perturbation. Sometimes known as the {\textquoteleft}{\textquoteleft}strength function{\textquoteright}{\textquoteright} from nuclear spectroscopy experiments, the shape of the LDOS is directly related to the survivial probability of unperturbed eigenstates, and has recently been related to the fidelity decay (or {\textquoteleft}{\textquoteleft}Loschmidt echo{\textquoteright}{\textquoteright}) under imperfect motion-reversal. For quantum systems that can be simulated efficiently on a quantum computer, the LDOS estimation algorithm enables an exponential speed-up over direct classical computation.}, keywords = {Quantum chaos, Quantum simulation}, author = {J. Emerson and Lloyd, S. and Poulin, D. and Cory, D.} } @article {PBLO04b, title = {Exponential Speedup with a Single Bit of Quantum Information: Measuring the Average Fidelity Decay}, journal = {Physical Review Letters}, volume = {92}, number = {17}, year = {2004}, pages = {177906}, abstract = {We present an efficient quantum algorithm to measure the average fidelity decay of a quantum map under perturbation using a single bit of quantum information. Our algorithm scales only as the complexity of the map under investigation, so for those maps admitting an efficient gate decomposition, it provides an exponential speed up over known classical procedures. Fidelity decay is important in the study of complex dynamical systems, where it is conjectured to be a signature of eigenvector statistics. Our result also illustrates the role of chaos in the process of decoherence.}, keywords = {Quantum chaos, Quantum simulation}, author = {Poulin, D. and Blume-Kohout, R. and R. Laflamme and Ollivier, H.} } @article {HKL+04a, title = {Noiseless subsystems for collective rotation channels in quantum information theory}, journal = {Integ. Eqs. {\&} Oper. Theo.}, year = {2004}, note = {to appear}, abstract = {Collective rotation channels are a fundamental class of channels in quantum computing and quantum information theory. The commutant of the noise operators for such a channel is a c*-algebra which is equal to the set of fixed points for the channel. Finding the precise spatial structure of the commutant algebra for a set of noise operators associated with a channel is a core problem in quantum error prevention. We draw on methods of operator algebras, quantum mechanics and combinatorics to explicitly determine the structure of the commutant for the class of collective rotation channels.}, keywords = {Error correction}, author = {Holbrook, J. A. and Kribs, D. W. and R. Laflamme and Poulin, D.} } @article {OPZ04a, title = {Objective properties from subjective quantum states: Environment as a witness}, journal = {Physical Review Letters}, volume = {93}, year = {2004}, pages = {220401}, abstract = {We study the emergence of objective properties in open quantum systems. In our analysis, the environment is promoted from a passive role of reservoir selectively destroying quantum coherence, to an active role of amplifier selectively proliferating information about the system. We show that only preferred pointer states of the system can leave a redundant and therefore easily detectable imprint on the environment. Observers who{\textendash}-as it is almost always the case{\textendash}-discover the state of the system indirectly (by probing a fraction of its environment) will find out only about the corresponding pointer observable. Many observers can act in this fashion independently and without perturbing the system: they will agree about the state of the system. In this operational sense, em preferred pointer states exist objectively.}, keywords = {Decoherence, Foundations, Quantum Theory}, author = {Ollivier, H. and Poulin, D. and Zurek, W. H.} } @article {BGL+04a, title = {Robust Polarization-Based Quantum Key Distribution over a Collective-Noise Channel}, journal = {Physical Review Letters}, volume = {92}, year = {2004}, pages = {017901}, abstract = {We present two polarization-based protocols for quantum key distribution. The protocols encode key bits in noiseless subspaces or subsystems, and so can function over a quantum channel subjected to an arbitrary degree of collective noise, as occurs, for instance, due to rotation of polarizations in an optical fiber. These protocols can be implemented using only entangled photon-pair sources, single-photon rotations, and single-photon detectors. Thus, our proposals offer practical and realistic alternatives to existing schemes for quantum key distribution over optical fibers without resorting to interferometry or two-way quantum communication, thereby circumventing, respectively, the need for high precision timing and the threat of Trojan horse attacks.}, keywords = {Crypto, Error correction}, author = {Boileau, J.-C. and Gottesman, D. and R. Laflamme and Poulin, D. and Spekkens, R. W.} } @article {KLP+03a, title = {Comment on {\textquoteright}Absence of a Slater Transition in the Two-Dimensional Hubbard Model{\textquoteright}}, journal = {Phys. Rev. Lett}, volume = {90}, year = {2003}, pages = {099702}, abstract = {{In a recent paper, Physical Review Letters 87, 167010/1-4 (2001), Moukouri and Jarrell presented evidence that in the two-dimensional (d=2) Hubbard model at half-filling there is a metal-insulator transition (MIT) at finite temperature even in weak coupling. While we agree with the numerical results of that paper, we arrive at different conclusions: The apparent gap at finite-temperature can be understood, at weak-coupling, as a crossover phenomenon involving large (but not infinite) antiferromagnetic (AFM) correlation length. Phase-space effects on the self-energy in d=2 are crucial, as are the ratio between AFM correlation length and single-particle thermal de Broglie wavelength. In weak coupling

}, keywords = {Condensed matter, Many-Body, Simulation}, author = {B. Kyung and J.S. Landry and Poulin, D. and A.-M. S. Tremblay} } @article {PB03a, title = {Compatibility of quantum states}, journal = {Physical Review A}, volume = {67}, year = {2003}, pages = {010101(R)}, abstract = {We introduce a measure of the compatibility between quantum states{\textendash}-the likelihood that two density matrices describe the same object. Our measure is motivated by two elementary requirements, which lead to a natural definition. We list some properties of this measure, and discuss its relation to the problem of combining two observers{\textquoteright} states of knowledge.}, keywords = {Foundations}, author = {Poulin, D. and Blume{\textendash}Kohout, R.} } @article {PLMP03a, title = {Testing Intergrability with a single bit of quantum information}, journal = {Physical Review A}, volume = {68}, year = {2003}, pages = {022302}, abstract = {We show that deterministic quantum computing with a single bit (DQC1) can determine whether the classical limit of a quantum system is chaotic or integrable using $O(N)$ physical resources, where $N$ is the dimension of the Hilbert space of the system under study. This is a square root improvement over all known classical procedures. Our study relies strictly on the random matrix conjecture. We also present numerical results for the nonlinear kicked top.}, keywords = {Quantum chaos, Quantum simulation}, author = {Poulin, D. and R. Laflamme and Milburn, G. J. and Paz, J.-P.} } @booklet {Pou02a, title = {A rough guide to quantum chaos}, year = {2002}, abstract = {This tutorial offers some insight into the question {\textquoteleft}{\textquoteleft}What is quantum chaos and why is it interesting?{\textquoteright}{\textquoteright}. Its main purpose is to present some signatures of chaos in the quantum world. This is {\i}t not} a technical reference, it contains but a few simple equations and no explicit references; rather, the main body of this manuscript is followed by a reading guide. Some of the mathematical tools used in the field are so cumbersome they often obscure the physical relevance of the problem under investigation. However, after having consulted this tutorial, the technical literature should appear less mysterious and, we hope, one should have a better intuition of what is interesting, and what is superficial!}, keywords = {Quantum chaos}, author = {Poulin, D.} } @article {Pou01a, title = {Classicality of Quantum Information Processing}, journal = {Physical Review A}, volume = {65}, year = {2001}, pages = {42319}, abstract = {The ultimate goal of the classicality program is to quantify the amount of quantumness of certain processes. Here, classicality is studied for a restricted type of process: quantum information processing (QIP). Under special conditions, one can force some qubits of a quantum computer into a classical state without affecting the outcome of the computation. The minimal set of conditions is described and its structure is studied. Some implications of this formalism are the increase of noise robustness, a proof of the quantumness of mixed state quantum computing and a step forward in understanding the very foundation of QIP.}, keywords = {Foundations}, author = {Poulin, D.} } @booklet {TP00a, title = {Aspects num{\'e}riques des simulations du mod{\`e}le de {Hubbard} {\textendash}- {M}onte {C}arlo quantique et m{\'e}thode d{\textquoteright}entropie maximum}, year = {2000}, publisher = {Universit{\'e} de Sherbrooke}, abstract = {Nous pr{\'e}sentons dans ce rapport une introduction compl{\`e}te {\`a} deux algorithmes couramment utilis{\'e}s en physique du solide dans le cadre du probl{\`e}me {\`a} N-corps quantique. Le premier est l{\textquoteright}algorithme de Blankenbecler, Scalapino et Sugar (bss) (aussi appel{\'e} algorithme du d{\'e}terminant), d{\'e}velopp{\'e} au d{\'e}but des ann{\'e}es 80 dans le but de calculer num{\'e}riquement les propri{\'e}t{\'e}s thermodynamiques de certains mod{\`e}les d{\textquoteright}{\'e}lectrons fortement corr{\'e}l{\'e}s, dont celui de Hubbard, {\'e}tudi{\'e} ici comme mod{\`e}le pour la supraconductivit{\'e} {\`a} haut Tc. Pour cette partie du rapport, notre {\'e}tude pr{\'e}sente d{\textquoteright}abord le fonctionnement de l{\textquoteright}algorithme pour s{\textquoteright}attarder ensuite {\`a} certains de ses aspects num{\'e}riques, en particulier ceux qui sont probl{\'e}matiques au niveau de la stabilit{\'e} des calculs. Parmi ceux-ci, nous retrouvons les instabilit{\'e}s li{\'e}es aux calculs {\`a} basse temp{\'e}rature, le choix de la d{\'e}composition de Trotter, le calcul des fonctions de Green et des observables {\`a} temps in{\'e}gal, de m{\^e}me que le probl{\`e}me d{\textquoteright}ergodicit{\'e}. En deuxi{\`e}me partie, nous abordons le calcul des propri{\'e}t{\'e}s dynamiques du mod{\`e}le de Hubbard, comme le poids spectral obtenu en prolongeant analytiquement les donn{\'e}es Monte Carlo calcul{\'e}es en temps imaginaire. Pour cette section du rapport, nous pr{\'e}sentons quelques {\'e}l{\'e}ments de la th{\'e}orie de la r{\'e}gularisation et la m{\'e}thode d{\textquoteright}entropie maximum utilis{\'e}s pour r{\'e}soudre le probl{\`e}me num{\'e}riquement mal pos{\'e} que constitue le prolongement analytique des donn{\'e}es Monte Carlo. Finalement, pour rendre ce rapport complet en soi, nous avons inclut en annexe une introduction {\`a} quelques outils d{\textquoteright}analyse num{\'e}rique ainsi qu{\textquoteright}un guide de lecture renvoyant {\`a} des r{\'e}f{\'e}rences de base sur la plupart des sujets trait{\'e}s dans ce rapport.}, keywords = {Condensed matter, Many-Body, Simulation}, author = {Hugo Touchette and Poulin, D.} } @article {Moukouri:2000sa, title = {Many-body theory versus simulations for the pseudogap in the Hubbard model}, journal = {Physical Review B}, volume = {61}, number = {12}, year = {2000}, pages = {7887{\textendash}7892}, abstract = {The opening of a critical-fluctuation-induced pseudogap (or precursor pseudogap) in the one-particle spectral weight of the half-filled two-dimensional Hubbard model is discussed. This pseudogap, appearing in our Monte Carlo simulations, may be obtained from many-body techniques that use Green functions and vertex corrections that are at the same level of approximation. Self-consistent theories of the Eliashberg type (such as the fluctuation exchange approximation) use renormalized Green functions and bare vertices in a context where there is no Migdal theorem. They do not find the pseudogap, in quantitative and qualitative disagreement with simulations, suggesting these methods are inadequate for this problem. Differences between precursor pseudogaps and strong-coupling pseudogaps are also discussed.

}, keywords = {Condensed matter, Many-Body, Simulation}, author = {Moukouri, S. and S. Allen and F. Lemay and B. Kyung and Poulin, D. and Vilk, Y.M. and A.-M. S. Tremblay} }