Dear all,
I’m trying to implement the ALCOVE model(attention learning model for category learning) using PyTensor operations. The model code is shown below.
with pm.Model() as model:
##########Priors##########
c = pm.Uniform("c", lower=0, upper=10)
phi = pm.Uniform("phi", lower=0, upper=10)
alr = pm.Beta("alr", alpha=1, beta=1)
wlr = pm.Beta("wlr", alpha=1, beta=1)
#########alcove simulation probs mean by Block#####
#p_block = pm.Deterministic("p_block", block_probs_from_theta(c, phi, alr, wlr))
#########Binomial Likelihood#########
y = pm.Binomial("y", n=n_block, p=p_block, observed = y_block)
idata = pm.sample()
print(az.summary(idata, var_names=["c", "phi", "alr", "wlr", "p_block"]))
az.plot_trace(idata, var_names=["c", "phi", "alr", "wlr"])
However, I get the following error:
ValueError Traceback (most recent call last) Cell In[2], line 80 75 #print(p_block.eval().shape) 76 77 #########Binomial Likelihood######### 78 y = pm.Binomial("y", n=n_block, p=p_block, observed = y_block) ---> 80 idata = pm.sample() 82 print(az.summary(idata, var_names=["c", "phi", "alr", "wlr", "p_block"])) 83 az.plot_trace(idata, var_names=["c", "phi", "alr", "wlr"]) File ~\anaconda3\envs\pymc_env\lib\site-packages\pymc\sampling\mcmc.py:679, in sample(draws, tune, chains, cores, random_seed, progressbar, step, var_names, nuts_sampler, initvals, init, jitter_max_retries, n_init, trace, discard_tuned_samples, compute_convergence_checks, keep_warning_stat, return_inferencedata, idata_kwargs, nuts_sampler_kwargs, callback, mp_ctx, model, **kwargs) 676 auto_nuts_init = False 678 initial_points = None --> 679 step = assign_step_methods(model, step, methods=pm.STEP_METHODS, step_kwargs=kwargs) 681 if nuts_sampler != "pymc": 682 if not isinstance(step, NUTS): File ~\anaconda3\envs\pymc_env\lib\site-packages\pymc\sampling\mcmc.py:210, in assign_step_methods(model, step, methods, step_kwargs) 208 methods_list: list[type[BlockedStep]] = list(methods or pm.STEP_METHODS) 209 selected_steps: dict[type[BlockedStep], list] = {} --> 210 model_logp = model.logp() 212 for var in model.value_vars: 213 if var not in assigned_vars: 214 # determine if a gradient can be computed File ~\anaconda3\envs\pymc_env\lib\site-packages\pymc\model\core.py:717, in Model.logp(self, vars, jacobian, sum) 715 rv_logps: list[TensorVariable] = [] 716 if rvs: --> 717 rv_logps = transformed_conditional_logp( 718 rvs=rvs, 719 rvs_to_values=self.rvs_to_values, 720 rvs_to_transforms=self.rvs_to_transforms, 721 jacobian=jacobian, 722 ) 723 assert isinstance(rv_logps, list) 725 # Replace random variables by their value variables in potential terms File ~\anaconda3\envs\pymc_env\lib\site-packages\pymc\logprob\basic.py:631, in transformed_conditional_logp(rvs, rvs_to_values, rvs_to_transforms, jacobian, **kwargs) 629 rvs_in_logp_expressions = _find_unallowed_rvs_in_graph(logp_terms_list) 630 if rvs_in_logp_expressions: --> 631 raise ValueError(RVS_IN_JOINT_LOGP_GRAPH_MSG % rvs_in_logp_expressions) 633 return logp_terms_list ValueError: Random variables detected in the logp graph: {wlr, phi, c, alr}. This can happen when DensityDist logp or Interval transform functions reference nonlocal variables, or when not all rvs have a corresponding value variable.
All model computations are defined inside a function called block_probs_from_theta.
If I manually assign constant values to the parameters (e.g., c=1.0, phi=1.0, alr=0.5, wlr=0.5), the function runs fine and produces the expected output.
But when I use these parameters as PyMC random variables inside the model, the sampling fails.
What does this error mean, and how can I properly connect my PyTensor function with the PyMC model so that sampling works?