Skip to content

Commit

Permalink
x_out optional even for multi-task and bug fix with entropy related p…
Browse files Browse the repository at this point in the history
…osterior quantities
  • Loading branch information
MarcusMNoack committed Nov 27, 2024
1 parent 4910147 commit c06d12b
Show file tree
Hide file tree
Showing 12 changed files with 146 additions and 146 deletions.
34 changes: 26 additions & 8 deletions docs/source/examples/MultiTaskTest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,9 @@
"metadata": {},
"outputs": [],
"source": [
"##first install the newest version of fvgp\n",
"#!pip install fvgp~=4.5.3"
"##first, install the newest version of fvgp\n",
"#!pip install fvgp~=4.5.6\n",
"#!pip install torch"
]
},
{
Expand Down Expand Up @@ -133,9 +134,10 @@
"outputs": [],
"source": [
"#mean and standard deviation\n",
"mean = my_gp2.posterior_mean(x_pred=x_pred1d.reshape(50,1), x_out=np.array([0,1]))[\"f(x)\"]\n",
"mean = my_gp2.posterior_mean(x_pred=x_pred1d.reshape(50,1))[\"f(x)\"]\n",
"std = np.sqrt(my_gp2.posterior_covariance(x_pred=x_pred1d.reshape(50,1), x_out=np.array([0,1]))[\"v(x)\"])\n",
"\n",
"\n",
"plt.plot(x_pred1d.reshape(50,1),mean[:,0], label = \"mean task 1\")\n",
"plt.plot(x_pred1d.reshape(50,1),mean[:,1], label = \"mean task 2\")\n",
"plt.scatter(x_data,y_data1) \n",
Expand Down Expand Up @@ -411,9 +413,9 @@
"\n",
"\n",
"bounds = np.zeros((n.number_of_hps+2,2))\n",
"bounds[0] = np.array([0.001,10.])\n",
"bounds[1] = np.array([0.001,10.])\n",
"bounds[2:] = np.array([-1,1])\n",
"bounds[0] = np.array([0.1,10.])\n",
"bounds[1] = np.array([0.1,10.])\n",
"bounds[2:] = np.array([0.01,1])\n",
"my_gp2.train(hyperparameter_bounds=bounds,max_iter = 2)"
]
},
Expand Down Expand Up @@ -508,13 +510,29 @@
"fig.show()\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "811a3337-a377-44bd-85b7-9a699c299cd0",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "069a28fb-fecf-42e9-bc27-16fcccec3080",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "fvgp_dev",
"language": "python",
"name": "python3"
"name": "fvgp_dev"
},
"language_info": {
"codemirror_mode": {
Expand Down
5 changes: 3 additions & 2 deletions docs/source/examples/NonEuclideanInputSpaces.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
"outputs": [],
"source": [
"#install the newest version of fvgp\n",
"#!pip install fvgp~=4.5.3"
"#!pip install fvgp~=4.5.6"
]
},
{
Expand Down Expand Up @@ -214,7 +214,8 @@
"outputs": [],
"source": [
"x_pred = [\"dwed\",\"dwe\"]\n",
"my_gp2.posterior_mean(x_pred, x_out = np.array([0,1,2,3]))"
"my_gp2.posterior_mean(x_pred, x_out = np.array([0,1,2,3]))\n",
"my_gp2.posterior_mean(x_pred)"
]
},
{
Expand Down
8 changes: 3 additions & 5 deletions docs/source/examples/SingleTaskTest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
"id": "a1b1c026",
"metadata": {},
"source": [
"This is the new test for fvgp version 4.3.0 and later."
"This is the new test for fvgp version 4.5.5 and later."
]
},
{
Expand All @@ -23,7 +23,7 @@
"metadata": {},
"outputs": [],
"source": [
"#!pip install fvgp~=4.5.3"
"#!pip install fvgp~=4.5.5"
]
},
{
Expand Down Expand Up @@ -170,7 +170,7 @@
" [0.001,0.1], #noise\n",
" [0.01,1.] #mean\n",
" ])\n",
"my_gp1.update_gp_data(x_data, y_data, noise_variances_new=np.ones(y_data.shape) * 0.01)\n",
"my_gp1.update_gp_data(x_data, y_data, noise_variances_new=np.ones(y_data.shape) * 0.01) #this is just for testing, not needed\n",
"print(\"Standard Training\")\n",
"my_gp1.train(hyperparameter_bounds=hps_bounds)\n",
"print(\"Global Training\")\n",
Expand Down Expand Up @@ -316,15 +316,13 @@
"my_gp1.gp_entropy(x_test)\n",
"my_gp1.gp_entropy_grad(x_test, 0)\n",
"my_gp1.gp_kl_div(x_test, np.ones((len(x_test))), np.identity((len(x_test))))\n",
"my_gp1.gp_kl_div_grad(x_test, np.ones((len(x_test))), np.identity((len(x_test))), 0)\n",
"my_gp1.gp_relative_information_entropy(x_test)\n",
"my_gp1.gp_relative_information_entropy_set(x_test)\n",
"my_gp1.posterior_covariance(x_test)\n",
"my_gp1.posterior_covariance_grad(x_test)\n",
"my_gp1.posterior_mean(x_test)\n",
"my_gp1.posterior_mean_grad(x_test)\n",
"my_gp1.posterior_probability(x_test, np.ones((len(x_test))), np.identity((len(x_test))))\n",
"my_gp1.posterior_probability_grad(x_test, np.ones((len(x_test))), np.identity((len(x_test))),0)\n",
"\n"
]
},
Expand Down
3 changes: 2 additions & 1 deletion docs/source/examples/gp2ScaleTest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@
"outputs": [],
"source": [
"##first install the newest version of fvgp\n",
"#!pip install fvgp~=4.5.3"
"#!pip install fvgp~=4.5.6\n",
"#!pip install imate"
]
},
{
Expand Down
21 changes: 15 additions & 6 deletions examples/MultiTaskTest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@
"outputs": [],
"source": [
"##first, install the newest version of fvgp\n",
"#!pip install fvgp~=4.5.5"
"#!pip install fvgp~=4.5.6\n",
"#!pip install torch"
]
},
{
Expand Down Expand Up @@ -412,9 +413,9 @@
"\n",
"\n",
"bounds = np.zeros((n.number_of_hps+2,2))\n",
"bounds[0] = np.array([0.001,10.])\n",
"bounds[1] = np.array([0.001,10.])\n",
"bounds[2:] = np.array([-1,1])\n",
"bounds[0] = np.array([0.1,10.])\n",
"bounds[1] = np.array([0.1,10.])\n",
"bounds[2:] = np.array([0.01,1])\n",
"my_gp2.train(hyperparameter_bounds=bounds,max_iter = 2)"
]
},
Expand Down Expand Up @@ -517,13 +518,21 @@
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "069a28fb-fecf-42e9-bc27-16fcccec3080",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "fvgp_dev",
"language": "python",
"name": "python3"
"name": "fvgp_dev"
},
"language_info": {
"codemirror_mode": {
Expand Down
5 changes: 3 additions & 2 deletions examples/NonEuclideanInputSpaces.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
"outputs": [],
"source": [
"#install the newest version of fvgp\n",
"#!pip install fvgp~=4.5.3"
"#!pip install fvgp~=4.5.6"
]
},
{
Expand Down Expand Up @@ -214,7 +214,8 @@
"outputs": [],
"source": [
"x_pred = [\"dwed\",\"dwe\"]\n",
"my_gp2.posterior_mean(x_pred, x_out = np.array([0,1,2,3]))"
"my_gp2.posterior_mean(x_pred, x_out = np.array([0,1,2,3]))\n",
"my_gp2.posterior_mean(x_pred)"
]
},
{
Expand Down
4 changes: 1 addition & 3 deletions examples/SingleTaskTest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@
" [0.001,0.1], #noise\n",
" [0.01,1.] #mean\n",
" ])\n",
"my_gp1.update_gp_data(x_data, y_data, noise_variances_new=np.ones(y_data.shape) * 0.01)\n",
"my_gp1.update_gp_data(x_data, y_data, noise_variances_new=np.ones(y_data.shape) * 0.01) #this is just for testing, not needed\n",
"print(\"Standard Training\")\n",
"my_gp1.train(hyperparameter_bounds=hps_bounds)\n",
"print(\"Global Training\")\n",
Expand Down Expand Up @@ -316,15 +316,13 @@
"my_gp1.gp_entropy(x_test)\n",
"my_gp1.gp_entropy_grad(x_test, 0)\n",
"my_gp1.gp_kl_div(x_test, np.ones((len(x_test))), np.identity((len(x_test))))\n",
"my_gp1.gp_kl_div_grad(x_test, np.ones((len(x_test))), np.identity((len(x_test))), 0)\n",
"my_gp1.gp_relative_information_entropy(x_test)\n",
"my_gp1.gp_relative_information_entropy_set(x_test)\n",
"my_gp1.posterior_covariance(x_test)\n",
"my_gp1.posterior_covariance_grad(x_test)\n",
"my_gp1.posterior_mean(x_test)\n",
"my_gp1.posterior_mean_grad(x_test)\n",
"my_gp1.posterior_probability(x_test, np.ones((len(x_test))), np.identity((len(x_test))))\n",
"my_gp1.posterior_probability_grad(x_test, np.ones((len(x_test))), np.identity((len(x_test))),0)\n",
"\n"
]
},
Expand Down
3 changes: 2 additions & 1 deletion examples/gp2ScaleTest.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@
"outputs": [],
"source": [
"##first install the newest version of fvgp\n",
"#!pip install fvgp~=4.5.3"
"#!pip install fvgp~=4.5.6\n",
"#!pip install imate"
]
},
{
Expand Down
2 changes: 0 additions & 2 deletions fvgp/fvgp.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,8 +266,6 @@ class provides all the methods described for the GP (:py:class:`fvgp.GP`) class.
:py:meth:`fvgp.GP.posterior_probability`
:py:meth:`fvgp.GP.posterior_probability_grad`
Validation Methods:
:py:meth:`fvgp.GP.crps`
Expand Down
31 changes: 1 addition & 30 deletions fvgp/gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -1232,7 +1232,7 @@ def posterior_probability(self, x_pred, comp_mean, comp_cov, x_out=None):
comp_mean: np.ndarray
A vector of mean values, same length as x_pred.
comp_cov: np.nparray
Covariance matrix, in R^{len(x_pred) times len(x_pred)}
Covariance matrix, in R^{len(x_pred) x len(x_pred)}
x_out : np.ndarray, optional
Output coordinates in case of multi-task GP use; a numpy array of size (N),
where N is the number evaluation points in the output direction.
Expand All @@ -1245,35 +1245,6 @@ def posterior_probability(self, x_pred, comp_mean, comp_cov, x_out=None):
"""
return self.posterior.posterior_probability(x_pred, comp_mean, comp_cov, x_out=x_out)

def posterior_probability_grad(self, x_pred, comp_mean, comp_cov, direction, x_out=None):
"""
Function to compute the gradient of the probability of a probabilistic quantity of interest,
given the GP posterior at a given point.
Parameters
----------
x_pred : np.ndarray or list
A numpy array of shape (V x D), interpreted as an array of input point positions, or a list for
GPs on non-Euclidean input spaces.
comp_mean: np.ndarray
A vector of mean values, same length as x_pred.
comp_cov: np.nparray
Covariance matrix, in R^{len(x_pred) times len(x_pred)}
direction : int
The direction to compute the gradient in.
x_out : np.ndarray, optional
Output coordinates in case of multi-task GP use; a numpy array of size (N),
where N is the number evaluation points in the output direction.
Usually this is np.ndarray([0,1,2,...]).
Return
------
Solution : dict
The gradient of the probability of a probabilistic quantity of interest,
given the GP posterior at a given point.
"""
return self.posterior.posterior_probability_grad(x_pred, comp_mean, comp_cov, direction, x_out=x_out)

####################################################################################
####################################################################################
#######################VALIDATION###################################################
Expand Down
Loading

0 comments on commit c06d12b

Please sign in to comment.