From 694f59386c51760a3d066b7204545e28065e7075 Mon Sep 17 00:00:00 2001 From: marcolivierarsenault Date: Sun, 3 Nov 2024 17:56:20 +0000 Subject: [PATCH] Update gh-pages to output generated at 843f699 --- feed.xml | 4 ++-- lossless-triplet-loss.html | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/feed.xml b/feed.xml index 964cc5d..285f080 100644 --- a/feed.xml +++ b/feed.xml @@ -5,8 +5,8 @@ Data Blog by Marc-Olivier Arsenault https://coffeeanddata.ca/ - Wed, 25 Sep 2024 01:59:41 +0000 - Wed, 25 Sep 2024 01:59:41 +0000 + Sun, 03 Nov 2024 17:56:13 +0000 + Sun, 03 Nov 2024 17:56:13 +0000 Jekyll v4.3.4 diff --git a/lossless-triplet-loss.html b/lossless-triplet-loss.html index b93a1cf..825e9f1 100644 --- a/lossless-triplet-loss.html +++ b/lossless-triplet-loss.html @@ -139,7 +139,7 @@

THE PROBLEM

loss = K.maximum(basic_loss,0.0) return loss - + def create_base_network(in_dims, out_dims): """ Base network to be shared. @@ -155,7 +155,7 @@

THE PROBLEM

model.add(BatchNormalization()) return model - + in_dims = (N_MINS, n_feat) out_dims = N_FACTORS @@ -240,7 +240,7 @@

THE PROBLEM

OTHER LOSSES

-

Another famous loss function the contrastive loss describe by Yan LeCun and his team in their paper Dimensionality Reduction by Learning an Invariant Mapping is also maxing the negative result, which creates the same issue.

+

Another famous loss function the contrastive loss describe by Yan LeCun and his team in their paper Dimensionality Reduction by Learning an Invariant Mapping is also maxing the negative result, which creates the same issue.

The Contrastive Loss Function, (LeCun) @@ -323,7 +323,7 @@

FIRST RESULTS

# distance between the anchor and the negative neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor,negative)),1) - #Non Linear Values + #Non Linear Values # -ln(-x/N+1) pos_dist = -tf.log(-tf.divide((pos_dist),beta)+1+epsilon)