第 71 章 贝叶斯工作流程

71.1 贝叶斯工作流程

  1. 数据探索和准备
  2. 全概率模型
  3. 先验预测检查,利用先验模拟响应变量
  4. 模型应用到模拟数据,看参数恢复情况
  5. 模型应用到真实数据
  6. 检查抽样效率和模型收敛情况
  7. 模型评估和后验预测检查
  8. 信息准则与交叉验证,以及模型选择

71.2 案例

我们用ames房屋价格,演示贝叶斯数据分析的工作流程

library(tidyverse)
library(tidybayes)
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())

71.2.1 1) 数据探索和准备

rawdf <- readr::read_rds("./demo_data/ames_houseprice.rds") 
rawdf
## # A tibble: 1,460 × 81
##      id ms_sub_class ms_zoning lot_frontage lot_area
##   <dbl>        <dbl> <chr>            <dbl>    <dbl>
## 1     1           60 RL                  65     8450
## 2     2           20 RL                  80     9600
## 3     3           60 RL                  68    11250
## 4     4           70 RL                  60     9550
## 5     5           60 RL                  84    14260
## 6     6           50 RL                  85    14115
## # … with 1,454 more rows, and 76 more variables:
## #   street <chr>, alley <chr>, lot_shape <chr>,
## #   land_contour <chr>, utilities <chr>,
## #   lot_config <chr>, land_slope <chr>,
## #   neighborhood <chr>, condition1 <chr>,
## #   condition2 <chr>, bldg_type <chr>,
## #   house_style <chr>, overall_qual <dbl>, …

为了简化,我们只关注房屋价格(sale_price)与房屋占地面积(lot_area)和所在地理位置(neighborhood)的关系,这里需要点准备工作

  • 房屋价格与房屋占地面积这两个变量对数化处理 (why ?)
  • 地理位置变量转换因子类型 (why ?)
  • 房屋价格与房屋占地面积这两个变量标准化处理 (why ?)
df <- rawdf %>%
  select(sale_price, lot_area, neighborhood) %>%
  drop_na() %>%
  mutate(
    across(c(sale_price, lot_area), log),
    across(neighborhood, as.factor)
  ) %>%
  mutate(
    across(c(sale_price, lot_area), ~ (.x - mean(.x)) /sd(.x) ),
  )

head(df)
## # A tibble: 6 × 3
##   sale_price lot_area neighborhood
##        <dbl>    <dbl> <fct>       
## 1      0.560   -0.133 CollgCr     
## 2      0.213    0.113 Veenker     
## 3      0.734    0.420 CollgCr     
## 4     -0.437    0.103 Crawfor     
## 5      1.01     0.878 NoRidge     
## 6     -0.384    0.858 Mitchel
df %>%
  ggplot(aes(x = lot_area, y = sale_price)) +
  geom_point(colour = "blue") +
  geom_smooth(method = lm, se = FALSE, formula = "y ~ x")
df %>%
  ggplot(aes(x = lot_area, y = sale_price)) +
  geom_point(colour = "blue") +
  geom_smooth(method = lm, se = FALSE, formula = "y ~ x", fullrange = TRUE) +
  facet_wrap(vars(neighborhood))

71.2.2 2) 数据模型

\[ \begin{align} y_i &\sim \operatorname{Normal}(\mu_i, \sigma) \\ \mu_i &= \alpha_{j} + \beta * x_i \\ \alpha_j & \sim \operatorname{Normal}(0, 10)\\ \beta & \sim \operatorname{Normal}(0, 10) \\ \sigma &\sim \exp(1) \end{align} \]

如果建立了这样的数学模型,可以马上写出stan代码

stan_program <- "
data {
  int<lower=1> n;           
  int<lower=1> n_neighbour;      
  int<lower=1> neighbour[n];     
  vector[n] lot;  
  vector[n] price;  
  
  real alpha_sd;
  real beta_sd;
  int<lower = 0, upper = 1> run_estimation;
}
parameters {
  vector[n_neighbour] alpha;
  real beta;
  real<lower=0> sigma;
}
model {
  vector[n] mu;  
  
  for (i in 1:n) {
    mu[i] = alpha[neighbour[i]] + beta * lot[i];
  }
  
  alpha ~ normal(0, alpha_sd);
  beta ~ normal(0, beta_sd);
  sigma ~ exponential(1);
  
  if(run_estimation == 1) {
     target += normal_lpdf(price | mu, sigma);
  }
    
}
generated quantities {
   vector[n] log_lik; 
   vector[n] y_hat;
   
   for (j in 1:n) {
     log_lik[j] = normal_lpdf(price | alpha[neighbour[j]] + beta * lot[j], sigma);
     y_hat[j]   = normal_rng(alpha[neighbour[j]] + beta * lot[j], sigma);
   }
}
"

71.2.3 3) 先验预测检查,利用先验模拟响应变量

有个问题,我们这个先验概率怎么来的呢?猜的,因为没有人知道它究竟是什么分布(如果您是这个领域的专家,就不是猜,而叫合理假设)。那到底合不合理,我们需要检验下。这里用到的技术是先验预测检验。怎么做?

  • 首先,模拟先验概率分布
  • 然后,通过先验和模型假定的线性关系,模拟相应的响应变量\(y_i\)(注意,不是真实的数据)
stan_data <- df %>%
  tidybayes::compose_data(
    n_neighbour    = n_distinct(neighborhood),
    neighbour      = neighborhood,
    price          = sale_price,
    lot            = lot_area,
    alpha_sd       = 10, 
    beta_sd        = 10, 
    run_estimation = 0
  )



model_only_prior_sd_10 <- stan(model_code = stan_program, data = stan_data, 
                       chains = 1, iter = 2100, warmup = 2000)
## 
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 7.9e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.79 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2100 [  0%]  (Warmup)
## Chain 1: Iteration:  210 / 2100 [ 10%]  (Warmup)
## Chain 1: Iteration:  420 / 2100 [ 20%]  (Warmup)
## Chain 1: Iteration:  630 / 2100 [ 30%]  (Warmup)
## Chain 1: Iteration:  840 / 2100 [ 40%]  (Warmup)
## Chain 1: Iteration: 1050 / 2100 [ 50%]  (Warmup)
## Chain 1: Iteration: 1260 / 2100 [ 60%]  (Warmup)
## Chain 1: Iteration: 1470 / 2100 [ 70%]  (Warmup)
## Chain 1: Iteration: 1680 / 2100 [ 80%]  (Warmup)
## Chain 1: Iteration: 1890 / 2100 [ 90%]  (Warmup)
## Chain 1: Iteration: 2001 / 2100 [ 95%]  (Sampling)
## Chain 1: Iteration: 2100 / 2100 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 7.03 seconds (Warm-up)
## Chain 1:                0.336 seconds (Sampling)
## Chain 1:                7.366 seconds (Total)
## Chain 1:
dt_wide <- model_only_prior_sd_10 %>% 
  as.data.frame() %>% 
  select(`alpha[5]`, beta) %>% 
  rowwise() %>%
  mutate(
    set = list(tibble(
      x = seq(from = -3, to = 3, length.out = 200),
      y = `alpha[5]` + beta * x
    ))
  )


ggplot() +
  map(
    dt_wide$set,
    ~ geom_line(data = ., aes(x = x, y = y), alpha = 0.2)
  )
stan_data <- df %>%
  tidybayes::compose_data(
    n_neighbour    = n_distinct(neighborhood),
    neighbour      = neighborhood,
    price          = sale_price,
    lot            = lot_area,
    alpha_sd       = 1, 
    beta_sd        = 1, 
    run_estimation = 0
  )



model_only_prior_sd_1 <- stan(model_code = stan_program, data = stan_data, 
                       chains = 1, iter = 2100, warmup = 2000)
## 
## SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 0.000157 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.57 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2100 [  0%]  (Warmup)
## Chain 1: Iteration:  210 / 2100 [ 10%]  (Warmup)
## Chain 1: Iteration:  420 / 2100 [ 20%]  (Warmup)
## Chain 1: Iteration:  630 / 2100 [ 30%]  (Warmup)
## Chain 1: Iteration:  840 / 2100 [ 40%]  (Warmup)
## Chain 1: Iteration: 1050 / 2100 [ 50%]  (Warmup)
## Chain 1: Iteration: 1260 / 2100 [ 60%]  (Warmup)
## Chain 1: Iteration: 1470 / 2100 [ 70%]  (Warmup)
## Chain 1: Iteration: 1680 / 2100 [ 80%]  (Warmup)
## Chain 1: Iteration: 1890 / 2100 [ 90%]  (Warmup)
## Chain 1: Iteration: 2001 / 2100 [ 95%]  (Sampling)
## Chain 1: Iteration: 2100 / 2100 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 7.485 seconds (Warm-up)
## Chain 1:                0.346 seconds (Sampling)
## Chain 1:                7.831 seconds (Total)
## Chain 1:
dt_narrow <- model_only_prior_sd_1 %>% 
  as.data.frame() %>% 
  select(`alpha[5]`, beta) %>% 
  rowwise() %>%
  mutate(
    set = list(tibble(
      x = seq(from = -3, to = 3, length.out = 200),
      y = `alpha[5]` + beta * x
    ))
  )


ggplot() +
  map(
    dt_narrow$set,
    ~ geom_line(data = ., aes(x = x, y = y), alpha = 0.2)
  )

71.2.4 4) 模型应用到模拟数据,看参数恢复情况

df_random_draw <- model_only_prior_sd_1 %>% 
  tidybayes::gather_draws(alpha[i], beta, sigma, y_hat[i], n = 1)

true_parameters <- df_random_draw %>% 
  filter(.variable %in% c("alpha", "beta", "sigma")) %>%
  mutate(parameters = if_else(is.na(i), .variable, str_c(.variable, "_", i)))


y_sim <- df_random_draw %>% 
  filter(.variable == "y_hat") %>% 
  pull(.value)

模拟的数据y_sim,导入模型作为响应变量,

stan_data <- df %>%
  tidybayes::compose_data(
    n_neighbour    = n_distinct(neighborhood),
    neighbour      = neighborhood,
    price          = y_sim,      ##  这里是模拟数据
    lot            = lot_area,
    alpha_sd       = 1, 
    beta_sd        = 1, 
    run_estimation = 1
  )

model_on_fake_dat <- stan(model_code = stan_program, data = stan_data)

看参数恢复的如何

model_on_fake_dat %>% 
  tidybayes::gather_draws(alpha[i], beta, sigma) %>% 
  ungroup() %>% 
  mutate(parameters = if_else(is.na(i), .variable, str_c(.variable, "_", i))) %>% 

  ggplot(aes(x = .value)) +
  geom_density() +
  geom_vline(
    data = true_parameters,
    aes(xintercept = .value),
    color = "red"
    ) +
  facet_wrap(vars(parameters), ncol = 5, scales = "free")

如果觉得上面的过程很麻烦,可以直接用bayesplot::mcmc_recover_hist()

posterior_alpha_beta <- 
  as.matrix(model_on_fake_dat, pars = c('alpha', 'beta', 'sigma'))

bayesplot::mcmc_recover_hist(posterior_alpha_beta, true = true_parameters$.value)

71.2.5 5) 模型应用到真实数据

应用到真实数据

stan_data <- df %>%
  tidybayes::compose_data(
    n_neighbour    = n_distinct(neighborhood),
    neighbour      = neighborhood,
    price          = sale_price,      ##  这里是真实数据
    lot            = lot_area,
    alpha_sd       = 1, 
    beta_sd        = 1, 
    run_estimation = 1
  )

model <- stan(model_code = stan_program, data = stan_data)

71.2.6 6) 检查抽样效率和模型收敛情况

  • 检查traceplot
rstan::traceplot(model)
  • 检查neff 和 Rhat
print(model,
  pars = c("alpha", "beta", "sigma"),
  probs = c(0.025, 0.50, 0.975),
  digits_summary = 3
)
## Inference for Stan model: anon_model.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##             mean se_mean    sd   2.5%    50%  97.5%
## alpha[1]   1.002   0.002 0.154  0.700  1.001  1.296
## alpha[2]   0.566   0.004 0.388 -0.179  0.562  1.320
## alpha[3]  -0.102   0.002 0.164 -0.426 -0.101  0.200
## alpha[4]  -0.687   0.001 0.079 -0.842 -0.687 -0.534
## alpha[5]   0.003   0.002 0.121 -0.237  0.002  0.244
## alpha[6]   0.329   0.001 0.050  0.232  0.329  0.425
## alpha[7]   0.339   0.001 0.086  0.171  0.339  0.514
## alpha[8]  -0.779   0.001 0.063 -0.899 -0.778 -0.657
## alpha[9]   0.215   0.001 0.068  0.081  0.216  0.341
## alpha[10] -1.329   0.001 0.098 -1.517 -1.328 -1.141
## alpha[11] -0.409   0.002 0.159 -0.717 -0.409 -0.100
## alpha[12] -0.320   0.001 0.085 -0.486 -0.320 -0.152
## alpha[13] -0.440   0.000 0.041 -0.520 -0.441 -0.361
## alpha[14]  1.368   0.001 0.096  1.179  1.369  1.560
## alpha[15]  0.317   0.002 0.205 -0.083  0.318  0.721
## alpha[16]  1.412   0.001 0.070  1.277  1.412  1.546
## alpha[17]  0.100   0.001 0.073 -0.042  0.100  0.238
## alpha[18] -0.684   0.001 0.057 -0.792 -0.684 -0.571
## alpha[19] -0.596   0.001 0.070 -0.734 -0.596 -0.459
## alpha[20]  0.119   0.001 0.081 -0.038  0.120  0.274
## alpha[21]  0.870   0.001 0.067  0.736  0.871  1.004
## alpha[22]  1.420   0.001 0.121  1.178  1.419  1.663
## alpha[23] -0.361   0.001 0.118 -0.594 -0.360 -0.133
## alpha[24]  0.505   0.001 0.103  0.305  0.506  0.702
## alpha[25]  0.517   0.002 0.182  0.168  0.516  0.865
## beta       0.347   0.000 0.021  0.306  0.347  0.389
## sigma      0.607   0.000 0.011  0.586  0.607  0.630
##           n_eff  Rhat
## alpha[1]   6885 1.000
## alpha[2]   8576 1.000
## alpha[3]   6391 0.999
## alpha[4]   8428 1.000
## alpha[5]   6341 0.999
## alpha[6]   8628 0.999
## alpha[7]   8674 1.000
## alpha[8]   8823 1.000
## alpha[9]   6789 0.999
## alpha[10]  9389 0.999
## alpha[11]  5466 1.000
## alpha[12]  7407 1.000
## alpha[13]  6905 1.000
## alpha[14]  7713 1.000
## alpha[15]  6962 0.999
## alpha[16]  8396 0.999
## alpha[17]  7733 0.999
## alpha[18]  8273 1.000
## alpha[19]  8086 0.999
## alpha[20]  9307 1.000
## alpha[21]  7885 0.999
## alpha[22]  7681 0.999
## alpha[23]  7996 0.999
## alpha[24]  7145 1.000
## alpha[25]  8519 1.000
## beta       3814 1.000
## sigma      8300 1.000
## 
## Samples were drawn using NUTS(diag_e) at Sun May  8 10:17:12 2022.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
  • 检查posterior sample
model %>% 
  tidybayes::gather_draws(alpha[i], beta, sigma) %>% 
  ungroup() %>% 
  mutate(parameters = if_else(is.na(i), .variable, str_c(.variable, "_", i))) %>%
  
  ggplot(aes(x = .value, y = parameters)) +
  ggdist::stat_halfeye()

事实上,bayesplot宏包很强大也很好用

bayesplot::mcmc_combo(
  as.array(model),
  combo = c("dens_overlay", "trace"),
  pars = c('alpha[1]', 'beta', 'sigma')
 ) 

71.2.7 7) 模型评估和后验预测检查

yrep <- extract(model)[["y_hat"]]

samples <- sample(nrow(yrep), 300)
bayesplot::ppc_dens_overlay(as.vector(df$sale_price), yrep[samples, ])

71.3 Conclusion

71.4 作业

  • 前面的模型只有变化的截距(即不同的商圈有不同的截距)斜率是固定的,要求:增加一个变化的斜率

\[ \begin{align} y_i &\sim \operatorname{Normal}(\mu_i, \sigma) \\ \mu_i &= \alpha_{j} + \beta_{j} * x_i \\ \alpha_j & \sim \operatorname{Normal}(0, 1)\\ \beta_j & \sim \operatorname{Normal}(0, 1) \\ \sigma &\sim \exp(1) \end{align} \]