Compare this org source: =================================================================== # A small :noweb mystery: indentation
#+property: header-args:R :session #+options: toc:nil #+author: #+date: The structure of a probablity proble can be represented by a ~JAGS~ code snippet: #+name: Struct #+begin_src R :eval never :exports code for (i in 1:nObs) { for (j in 1:nZ) { Z[i,j] ~ dbern(P[i,j]) logit(P[i,j]) <- alpha + beta[j]*X[i] } } #+end_src The same code snippet can be used for simulation: #+begin_src R :exports code :results none :noweb yes library(rjags) library(coda) ## Reproducibility ? set.seed(813) Params <- local({ nObs <- 500 nZ <- 5 X <- rnorm(nObs) alpha <- rnorm(1,0,3) beta <- rnorm(nZ,-1,2) list( nObs=nObs, nZ=nZ, X=X, alpha=alpha, beta=beta) }) ## Wrap model code M <- "model { <<Struct>> }" ## Compilation JM <- jags.model(textConnection(M), data=Params, n.adapt=1, n.chains=1) ## Forward sampling JS <- coda.samples(JM, "Z", n.iter=1) #+end_src and for inference, after adding priors of parameters: #+name: Priors #+begin_src R :eval never :exports code ## Priors alpha ~ dt(0, 1e-2, 3) for (j in 1:nZ) { beta[j] ~ dt(0,1e-2, 3) } #+end_src #+name: Inference #+begin_src R :noweb yes :exports code Data <- list( nObs=Params$nObs, nZ=Params$nZ, X=Params$X, Z=matrix(as.matrix(JS), ncol=Params$nZ, byrow=FALSE)) ## Wrap inference model MI <- "model { <<Struct>> <<Priors>> }" ## Compilation JMI <- jags.model(textConnection(MI), Data, n.chains=4) ## Inference sampling JMS <- coda.samples(JMI, c("alpha", "beta"), n.iter=1000) #+end_src #+RESULTS: Inference =================================================================== With the result of its export to Ascii: =================================================================== The structure of a probablity proble can be represented by a `JAGS' code snippet: ,---- | for (i in 1:nObs) { | for (j in 1:nZ) { | Z[i,j] ~ dbern(P[i,j]) | logit(P[i,j]) <- alpha + beta[j]*X[i] | } | } `---- The same code snippet can be used for simulation: ,---- | library(rjags) | library(coda) | ## Reproducibility ? | set.seed(813) | Params <- local({ | nObs <- 500 | nZ <- 5 | X <- rnorm(nObs) | alpha <- rnorm(1,0,3) | beta <- rnorm(nZ,-1,2) | list( | nObs=nObs, | nZ=nZ, | X=X, | alpha=alpha, | beta=beta) | }) | ## Wrap model code | M <- | "model { | for (i in 1:nObs) { | for (j in 1:nZ) { | Z[i,j] ~ dbern(P[i,j]) | logit(P[i,j]) <- alpha + beta[j]*X[i] | } | } | }" | ## Compilation | JM <- jags.model(textConnection(M), data=Params, n.adapt=1, n.chains=1) | ## Forward sampling | JS <- coda.samples(JM, "Z", n.iter=1) `---- and for inference, after adding priors of parameters: ,---- | ## Priors | alpha ~ dt(0, 1e-2, 3) | for (j in 1:nZ) { | beta[j] ~ dt(0,1e-2, 3) | } `---- ,---- | Data <- list( | nObs=Params$nObs, | nZ=Params$nZ, | X=Params$X, | Z=matrix(as.matrix(JS), ncol=Params$nZ, byrow=FALSE)) | ## Wrap inference model | MI <- | "model { | for (i in 1:nObs) { | for (j in 1:nZ) { | Z[i,j] ~ dbern(P[i,j]) | logit(P[i,j]) <- alpha + beta[j]*X[i] | } | } | ## Priors | alpha ~ dt(0, 1e-2, 3) | for (j in 1:nZ) { | beta[j] ~ dt(0,1e-2, 3) | } | }" | ## Compilation | JMI <- jags.model(textConnection(MI), Data, n.chains=4) | ## Inference sampling | JMS <- coda.samples(JMI, c("alpha", "beta"), n.iter=1000) `---- =================================================================== The indentation is not respected in the included JAGS snippets. Further attempts with exporting to PDF (both the built-in exporter and ox- pandoc) and DOCX (ox-pandoc) show that the problem remains the same, but, IIRC, with slight variations in whitespace. That's not serious (just ugly) for R/JAGS ; but it might be serious for Python/Sage, where whitespace is syntaxic. Thoughts ? -- Emmanuel Charpentier