From 96a88ceec8a373582a5586dadf724af01b8e0fa9 Mon Sep 17 00:00:00 2001 From: Ben Vincent Date: Fri, 18 Nov 2016 17:01:29 +0000 Subject: [PATCH] #147 addition of new discount function model --- .../DF_ExponentialPower.m | 113 ++++++++++++++++++ .../bens_new_model/ExponentialPower.m | 113 ++++++++++++++++++ .../ModelHierarchicalExpPower.m | 36 ++++++ .../bens_new_model/ModelMixedExpPower.m | 29 +++++ .../bens_new_model/ModelSeparateExpPower.m | 32 +++++ .../ModelClasses/bens_new_model/README.md | 5 + .../Hyperbolic1MagEffect.m | 0 .../ModelHierarchicalME.m | 0 .../ModelHierarchicalMEUpdated.m | 0 .../ModelHierarchicalME_MVNORM.m | 0 .../ModelMixedME.m | 0 .../ModelSeparateME.m | 0 .../plotExpPowerclusters.m | 85 +++++++++++++ ddToolbox/models/separateExpPower.jags | 30 +++++ 14 files changed, 443 insertions(+) create mode 100644 ddToolbox/DeterministicFunction/DF_ExponentialPower.m create mode 100644 ddToolbox/ModelClasses/bens_new_model/ExponentialPower.m create mode 100644 ddToolbox/ModelClasses/bens_new_model/ModelHierarchicalExpPower.m create mode 100644 ddToolbox/ModelClasses/bens_new_model/ModelMixedExpPower.m create mode 100644 ddToolbox/ModelClasses/bens_new_model/ModelSeparateExpPower.m create mode 100644 ddToolbox/ModelClasses/bens_new_model/README.md rename ddToolbox/ModelClasses/{hyperbolic_magnitide_effect_models => hyperbolic_magnitude_effect_models}/Hyperbolic1MagEffect.m (100%) rename ddToolbox/ModelClasses/{hyperbolic_magnitide_effect_models => hyperbolic_magnitude_effect_models}/ModelHierarchicalME.m (100%) rename ddToolbox/ModelClasses/{hyperbolic_magnitide_effect_models => hyperbolic_magnitude_effect_models}/ModelHierarchicalMEUpdated.m (100%) rename ddToolbox/ModelClasses/{hyperbolic_magnitide_effect_models => hyperbolic_magnitude_effect_models}/ModelHierarchicalME_MVNORM.m (100%) rename ddToolbox/ModelClasses/{hyperbolic_magnitide_effect_models => hyperbolic_magnitude_effect_models}/ModelMixedME.m (100%) rename ddToolbox/ModelClasses/{hyperbolic_magnitide_effect_models => hyperbolic_magnitude_effect_models}/ModelSeparateME.m (100%) create mode 100644 ddToolbox/discounting-plot-functions/plotExpPowerclusters.m create mode 100644 ddToolbox/models/separateExpPower.jags diff --git a/ddToolbox/DeterministicFunction/DF_ExponentialPower.m b/ddToolbox/DeterministicFunction/DF_ExponentialPower.m new file mode 100644 index 00000000..117dbb43 --- /dev/null +++ b/ddToolbox/DeterministicFunction/DF_ExponentialPower.m @@ -0,0 +1,113 @@ +classdef DF_ExponentialPower < DiscountFunction + %DF_ExponentialPower The classic 1-parameter discount function + + properties (Dependent) + + end + + methods (Access = public) + + function obj = DF_ExponentialPower(varargin) + obj = obj@DiscountFunction(); + + obj.theta.k = Stochastic('k'); + obj.theta.tau = Stochastic('tau'); + + % Input parsing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + p = inputParser; + p.StructExpand = false; + p.addParameter('samples',struct(), @isstruct) + p.parse(varargin{:}); + + fieldnames = fields(p.Results.samples); + % Add any provided samples + for n = 1:numel(fieldnames) + obj.theta.(fieldnames{n}).addSamples( p.Results.samples.(fieldnames{n}) ); + end + % ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + end + + + function plot(obj) + x = [1:365]; + + % don't plot if we've been given NaN's + if any(isnan(obj.theta.k.samples)) + warning('Not plotting due to NaN''s') + return + end + + % TODO + discountFraction = obj.eval(x, 'nExamples', 100); + + try + plot(x, discountFraction, '-', 'Color',[0.5 0.5 0.5 0.1]) + catch + % backward compatability + plot(x, discountFraction, '-', 'Color',[0.5 0.5 0.5]) + end + + xlabel('delay $D^B$', 'interpreter','latex') + ylabel('discount factor', 'interpreter','latex') + set(gca,'Xlim', [0 max(x)]) + box off + axis square + + % ~~~~~~~~~~~~~ + obj.data.plot() + % ~~~~~~~~~~~~~ + end + + + + +% function discountFraction = eval(obj, x, varargin) +% % evaluate the discount fraction : +% % - at the delays (x.delays) +% % - given the onj.parameters +% +% p = inputParser; +% p.addRequired('x', @isnumeric); +% p.addParameter('nExamples', [], @isscalar); +% p.parse(x, varargin{:}); +% +% n_samples_requested = p.Results.nExamples; +% n_samples_got = numel(obj.theta.k.samples); +% n_samples_to_get = min([n_samples_requested n_samples_got]); +% if ~isempty(n_samples_requested) +% % shuffle the deck and pick the top nExamples +% shuffledExamples = randperm(n_samples_to_get); +% ExamplesToPlot = shuffledExamples([1:n_samples_to_get]); +% else +% ExamplesToPlot = 1:n_samples_to_get; +% end +% +% if verLessThan('matlab','9.1') +% discountFraction = (bsxfun(@times,... +% exp( - obj.theta.k.samples(ExamplesToPlot)),... +% x) ); +% else +% % use new array broadcasting in 2016b +% discountFraction = exp( - obj.theta.k.samples(ExamplesToPlot) .* x ); +% end +% end + + end + + methods (Static, Access = protected) + + function y = function_evaluation(x, theta, ExamplesToPlot) + k = theta.k.samples(ExamplesToPlot); + tau = theta.tau.samples(ExamplesToPlot); + if verLessThan('matlab','9.1') + y = (bsxfun(@times, exp(-k), x) ); + else + % use new array broadcasting in 2016b + y = exp( - k .* x.^tau ); + end + end + + end + +end diff --git a/ddToolbox/ModelClasses/bens_new_model/ExponentialPower.m b/ddToolbox/ModelClasses/bens_new_model/ExponentialPower.m new file mode 100644 index 00000000..dc991d6f --- /dev/null +++ b/ddToolbox/ModelClasses/bens_new_model/ExponentialPower.m @@ -0,0 +1,113 @@ +classdef (Abstract) ExponentialPower < Parametric + + properties (Access = private) + getDiscountRate % function handle + end + + methods (Access = public) + + function obj = ExponentialPower(data, varargin) + obj = obj@Parametric(data, varargin{:}); + + obj.dfClass = @DF_ExponentialPower; + + % Create variables + obj.varList.participantLevel = {'k','tau','alpha','epsilon'}; + obj.varList.monitored = {'k','tau','alpha','epsilon', 'Rpostpred', 'P', 'VA', 'VB'}; + + %% Plotting + obj.plotFuncs.clusterPlotFunc = @plotExpPowerclusters; + + end + + function conditionalDiscountRates(obj, reward, plotFlag) + error('Not applicable to this model') + end + + function conditionalDiscountRates_GroupLevel(obj, reward, plotFlag) + error('Not applicable to this model') + end + + function experimentPlot(obj) + + names = obj.data.getIDnames('all'); + + for ind = 1:numel(names) + fh = figure('Name', ['participant: ' names{ind}]); + latex_fig(12, 10, 3) + + %% Set up psychometric function + psycho = PsychometricFunction('samples', obj.coda.getSamplesAtIndex(ind,{'alpha','epsilon'})); + + %% plot bivariate distribution of alpha, epsilon + subplot(1,4,1) + samples = obj.coda.getSamplesAtIndex(ind,{'alpha','epsilon'}); + mcmc.BivariateDistribution(... + samples.epsilon(:),... + samples.alpha(:),... + 'xLabel','error rate, $\epsilon$',... + 'ylabel','comparison accuity, $\alpha$',... + 'pointEstimateType',obj.pointEstimateType,... + 'plotStyle', 'hist',... + 'axisSquare', true); + + %% Plot the psychometric function + subplot(1,4,2) + psycho.plot() + + %% Set up discount function + ksamples = obj.coda.getSamplesAtIndex(ind,{'k','tau'}); + % don't plot if we don't have any samples. This is expected + % to happen if we are currently looking at the group-level + % unobserved participant and we are analysing a model + % without group level inferences (ie the mixed or separate + % models) + discountFunction = DF_ExponentialPower('samples', ksamples ); + % add data: TODO: streamline this on object creation ~~~~~ + % NOTE: we don't have data for group-level + data_struct = obj.data.getExperimentData(ind); + data_object = DataFile(data_struct); + discountFunction.data = data_object; + % ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + % TODO: this checking needs to be implemented in a + % smoother, more robust way + if ~isempty(ksamples) || ~any(isnan(ksamples)) + %% plot distribution of (k, tau) + subplot(1,4,3) + %discountFunction.plotParameters() + samples = obj.coda.getSamplesAtIndex(ind,{'k','tau'}); + mcmc.BivariateDistribution(... + samples.k(:),... + samples.tau(:),... + 'xLabel','discount rate, $k$',... + 'ylabel','time exponent, $\tau$',... + 'pointEstimateType',obj.pointEstimateType,... + 'plotStyle', 'hist',... + 'axisSquare', true); + + %% plot discount function + subplot(1,4,4) + discountFunction.plot() + end + + + if obj.shouldExportPlots + myExport(obj.savePath, 'expt',... + 'prefix', names{ind},... + 'suffix', obj.modelFilename,... + 'formats', {'png'}); + end + + close(fh) + end + end + + end + + + methods (Abstract) + initialiseChainValues + end + +end diff --git a/ddToolbox/ModelClasses/bens_new_model/ModelHierarchicalExpPower.m b/ddToolbox/ModelClasses/bens_new_model/ModelHierarchicalExpPower.m new file mode 100644 index 00000000..9291e435 --- /dev/null +++ b/ddToolbox/ModelClasses/bens_new_model/ModelHierarchicalExpPower.m @@ -0,0 +1,36 @@ +classdef ModelHierarchicalExpPower < ExponentialPower + %ModelHierarchical A model to estimate the log discount rate, according to the 1-parameter hyperbolic discount function. + % All parameters are estimated hierarchically. + + methods (Access = public) + + function obj = ModelHierarchicalExp1(data, varargin) + obj = obj@ExponentialPower(data, varargin{:}); + obj.modelFilename = 'hierarchicalExpPower'; + obj = obj.addUnobservedParticipant('GROUP'); + + % MUST CALL THIS METHOD AT THE END OF ALL MODEL-SUBCLASS CONSTRUCTORS + obj = obj.conductInference(); + end + + end + + + methods + + function initialParams = initialiseChainValues(obj, nchains) + % Generate initial values of the root nodes + for chain = 1:nchains + initialParams(chain).groupKmu = normrnd(0.001,0.1); + initialParams(chain).groupKsigma = rand*5; + initialParams(chain).groupW = rand; + initialParams(chain).groupALPHAmu = rand*10; + initialParams(chain).groupALPHAsigma = rand*5; + + % TODO: prior over group-level tau parameters + end + end + + end + +end diff --git a/ddToolbox/ModelClasses/bens_new_model/ModelMixedExpPower.m b/ddToolbox/ModelClasses/bens_new_model/ModelMixedExpPower.m new file mode 100644 index 00000000..7211a5d0 --- /dev/null +++ b/ddToolbox/ModelClasses/bens_new_model/ModelMixedExpPower.m @@ -0,0 +1,29 @@ +classdef ModelMixedExpPower < ExponentialPower + %ModelMixedExp1 + + methods (Access = public) + function obj = ModelMixedExp1(data, varargin) + obj = obj@ExponentialPower(data, varargin{:}); + obj.modelFilename = 'mixedExpPower'; + obj = obj.addUnobservedParticipant('GROUP'); + + % MUST CALL THIS METHOD AT THE END OF ALL MODEL-SUBCLASS CONSTRUCTORS + obj = obj.conductInference(); + end + end + + methods + + function initialParams = initialiseChainValues(obj, nchains) + % Generate initial values of the root nodes + nExperimentFiles = obj.data.getNExperimentFiles(); + for chain = 1:nchains + initialParams(chain).groupW = rand; + initialParams(chain).groupALPHAmu = rand*100; + initialParams(chain).groupALPHAsigma = rand*100; + end + end + + end + +end diff --git a/ddToolbox/ModelClasses/bens_new_model/ModelSeparateExpPower.m b/ddToolbox/ModelClasses/bens_new_model/ModelSeparateExpPower.m new file mode 100644 index 00000000..e06eeaf2 --- /dev/null +++ b/ddToolbox/ModelClasses/bens_new_model/ModelSeparateExpPower.m @@ -0,0 +1,32 @@ +classdef ModelSeparateExpPower < ExponentialPower + + + methods (Access = public) + + function obj = ModelSeparateExpPower(data, varargin) + obj = obj@ExponentialPower(data, varargin{:}); + obj.modelFilename = 'separateExpPower'; + + % MUST CALL THIS METHOD AT THE END OF ALL MODEL-SUBCLASS CONSTRUCTORS + obj = obj.conductInference(); + end + + end + + + methods + + function initialParams = initialiseChainValues(obj, nchains) + % Generate initial values of the root nodes + nExperimentFiles = obj.data.getNExperimentFiles(); + for chain = 1:nchains + initialParams(chain).k = unifrnd(0, 0.5, [nExperimentFiles,1]); + initialParams(chain).tau = unifrnd(0.01, 2, [nExperimentFiles,1]); + initialParams(chain).epsilon = 0.1 + rand([nExperimentFiles,1])/10; + initialParams(chain).alpha = abs(normrnd(0.01,10,[nExperimentFiles,1])); + end + end + + end + +end diff --git a/ddToolbox/ModelClasses/bens_new_model/README.md b/ddToolbox/ModelClasses/bens_new_model/README.md new file mode 100644 index 00000000..3d925b4a --- /dev/null +++ b/ddToolbox/ModelClasses/bens_new_model/README.md @@ -0,0 +1,5 @@ +# Ben's new discounting function + +As far as I know this new discount function is novel. The discount fraction is described by + +discount fraction = exp(-k.delay^tau) \ No newline at end of file diff --git a/ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/Hyperbolic1MagEffect.m b/ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/Hyperbolic1MagEffect.m similarity index 100% rename from ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/Hyperbolic1MagEffect.m rename to ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/Hyperbolic1MagEffect.m diff --git a/ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/ModelHierarchicalME.m b/ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/ModelHierarchicalME.m similarity index 100% rename from ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/ModelHierarchicalME.m rename to ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/ModelHierarchicalME.m diff --git a/ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/ModelHierarchicalMEUpdated.m b/ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/ModelHierarchicalMEUpdated.m similarity index 100% rename from ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/ModelHierarchicalMEUpdated.m rename to ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/ModelHierarchicalMEUpdated.m diff --git a/ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/ModelHierarchicalME_MVNORM.m b/ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/ModelHierarchicalME_MVNORM.m similarity index 100% rename from ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/ModelHierarchicalME_MVNORM.m rename to ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/ModelHierarchicalME_MVNORM.m diff --git a/ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/ModelMixedME.m b/ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/ModelMixedME.m similarity index 100% rename from ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/ModelMixedME.m rename to ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/ModelMixedME.m diff --git a/ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/ModelSeparateME.m b/ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/ModelSeparateME.m similarity index 100% rename from ddToolbox/ModelClasses/hyperbolic_magnitide_effect_models/ModelSeparateME.m rename to ddToolbox/ModelClasses/hyperbolic_magnitude_effect_models/ModelSeparateME.m diff --git a/ddToolbox/discounting-plot-functions/plotExpPowerclusters.m b/ddToolbox/discounting-plot-functions/plotExpPowerclusters.m new file mode 100644 index 00000000..fcaf88ae --- /dev/null +++ b/ddToolbox/discounting-plot-functions/plotExpPowerclusters.m @@ -0,0 +1,85 @@ +function plotExpPowerclusters(mcmcContainer, data, col, modelType, plotOptions) + +% TODO: +% remove input: +% - mcmcContainer, just pass in the data +% - data, just pass in +% rename to plotBivariateDistributions + + +% plot posteriors over (k,tau) for all participants, as contour plots + +probMass = 0.5; + +figure(12), clf + +% build samples +for p = 1:data.getNExperimentFiles() + k(:,p) = mcmcContainer.getSamplesFromExperimentAsMatrix(p, {'k'}); + tau(:,p) = mcmcContainer.getSamplesFromExperimentAsMatrix(p, {'tau'}); +end + +%% plot all actual participants +mcBivariateParticipants = mcmc.BivariateDistribution(... + k(:,[1:data.getNRealExperimentFiles()]),... + tau(:,[1:data.getNRealExperimentFiles()]),... + 'xLabel','k',... + 'yLabel','tau',... + 'plotStyle','contour',... + 'probMass',probMass,... + 'pointEstimateType','mode',... + 'patchProperties',definePlotOptions4Participant(col)); + +% TODO: enable this functionality in BivariateDistribution +% % plot numbers +% for p = 1:data.getNExperimentFiles() +% text(mcBivariate.mode(1),mcBivariate.mode(2),... +% sprintf('%d',p),... +% 'HorizontalAlignment','center',... +% 'VerticalAlignment','middle',... +% 'FontSize',9,... +% 'Color',col) +% end + +% keep axes zoomed in on all participants +axis tight +participantAxisBounds = axis; + +%% plot unobserved participant (ie group level) if they exist +k_group = k(:,data.getNExperimentFiles()); +tau_group = tau(:,data.getNExperimentFiles()); +if ~any(isnan(k(:,end))) && ~any(isnan(k_group)) && ~any(isnan(tau_group))% do we have (m,c) samples for the group-level? + if data.isUnobservedPartipantPresent() + mcBivariateGroup = mcmc.BivariateDistribution(... + k_group,... + tau_group,... %xLabel',variableNames{1},'yLabel',variableNames{2},... + 'plotStyle','contour',... + 'probMass',probMass,... + 'pointEstimateType', plotOptions.pointEstimateType,... + 'patchProperties', definePlotOptions4Group(col)); + end +end + +axis(participantAxisBounds) +set(gca,'XAxisLocation','origin',... + 'YAxisLocation','origin') +drawnow + +if plotOptions.shouldExportPlots + myExport(plotOptions.savePath, 'summary_plot',... + 'suffix', modelType,... + 'formats', {'png'}) +end + + function plotOpts = definePlotOptions4Participant(col) + plotOpts = {'FaceAlpha', 0.1,... + 'FaceColor', col,... + 'LineStyle', 'none'}; + end + + function plotOpts = definePlotOptions4Group(col) + plotOpts = {'FaceColor', 'none',... + 'EdgeColor', col,... + 'LineWidth', 2}; + end +end diff --git a/ddToolbox/models/separateExpPower.jags b/ddToolbox/models/separateExpPower.jags new file mode 100644 index 00000000..2dcafe5a --- /dev/null +++ b/ddToolbox/models/separateExpPower.jags @@ -0,0 +1,30 @@ +model{ + +# TODO: CURRENTLY GUESSTIMATING THESE HYPERPARAMETERS! +K_MEAN <- 0 +K_PRECISION <- 1/(0.01) + + +for (p in 1:(nRealExperimentFiles)){ + k[p] ~ dnorm(K_MEAN, K_PRECISION) + tau[p] ~ dnorm(1, 1/1^2) T(0,) + epsilon[p] ~ dbeta(1.1 , 10.9) T(,0.5) + alpha[p] ~ dexp(0.01) +} + +# neither phi() nor exp() can be vectorised + +for (t in 1:length(ID)) { + # calculate present subjective value for each reward + VA[t] <- A[t] * (exp( -k[ID[t]] * (DA[t]^tau[ID[t]]) ) ) + VB[t] <- B[t] * (exp( -k[ID[t]] * (DB[t]^tau[ID[t]]) ) ) + + # Psychometric function + P[t] <- epsilon[ID[t]] + (1-2*epsilon[ID[t]]) * phi( (VB[t]-VA[t]) / alpha[ID[t]] ) + + # response likelihood + R[t] ~ dbern(P[t]) # likelihood of actual response + Rpostpred[t] ~ dbern(P[t]) # posterior predicted response +} + +}