diff --git a/lib/Onyx.js b/lib/Onyx.js index a9993a77..6e66250a 100644 --- a/lib/Onyx.js +++ b/lib/Onyx.js @@ -909,6 +909,24 @@ function hasPendingMergeForKey(key) { return Boolean(mergeQueue[key]); } +/** + * We generally want to remove top-level nullish values from objects written to disk and cache, because it decreases the amount of data stored in memory and on disk. + * On native, when merging an existing value with new changes, SQLite will use JSON_PATCH, which removes top-level nullish values. + * To be consistent with the behaviour for merge, we'll also want to remove nullish values for "set" operations. + * On web, IndexedDB will keep the top-level keys along with a null value and this uses up storage and memory. + * This method will ensure that keys for null values are removed before an object is written to disk and cache so that all platforms are storing the data in the same efficient way. + * @private + * @param {*} value + * @returns {*} + */ +function removeNullObjectValues(value) { + if (_.isArray(value) || !_.isObject(value)) { + return value; + } + + return _.omit(value, objectValue => _.isNull(objectValue)); +} + /** * Write a value to our store with the given key * @@ -926,18 +944,20 @@ function set(key, value) { Logger.logAlert(`Onyx.set() called after Onyx.merge() for key: ${key}. It is recommended to use set() or merge() not both.`); } - const hasChanged = cache.hasValueChanged(key, value); + const valueWithNullRemoved = removeNullObjectValues(value); + + const hasChanged = cache.hasValueChanged(key, valueWithNullRemoved); // This approach prioritizes fast UI changes without waiting for data to be stored in device storage. - broadcastUpdate(key, value, hasChanged, 'set'); + broadcastUpdate(key, valueWithNullRemoved, hasChanged, 'set'); // If the value has not changed, calling Storage.setItem() would be redundant and a waste of performance, so return early instead. if (!hasChanged) { return Promise.resolve(); } - return Storage.setItem(key, value) - .catch(error => evictStorageAndRetry(error, set, key, value)); + return Storage.setItem(key, valueWithNullRemoved) + .catch(error => evictStorageAndRetry(error, set, key, valueWithNullRemoved)); } /** @@ -1034,19 +1054,21 @@ function merge(key, changes) { .then((existingValue) => { try { // We first only merge the changes, so we can provide these to the native implementation (SQLite uses only delta changes in "JSON_PATCH" to merge) - const batchedChanges = applyMerge(undefined, mergeQueue[key]); + let batchedChanges = applyMerge(undefined, mergeQueue[key]); // Clean up the write queue so we // don't apply these changes again delete mergeQueue[key]; // After that we merge the batched changes with the existing value - let modifiedData = applyMerge(existingValue, [batchedChanges]); - - // For objects, the key for null values needs to be removed from the object to ensure the value will get removed from storage completely. - // On native, SQLite will remove top-level keys that are null. To be consistent, we remove them on web too. - if (!_.isArray(modifiedData) && _.isObject(modifiedData)) { - modifiedData = _.omit(modifiedData, value => _.isNull(value)); + const modifiedData = removeNullObjectValues(applyMerge(existingValue, [batchedChanges])); + + // On native platforms we use SQLite which utilises JSON_PATCH to merge changes. + // JSON_PATCH generally removes top-level nullish values from the stored object. + // When there is no existing value though, SQLite will just insert the changes as a new value and thus the top-level nullish values won't be removed. + // Therefore we need to remove nullish values from the `batchedChanges` which are sent to the SQLite, if no existing value is present. + if (!existingValue) { + batchedChanges = removeNullObjectValues(batchedChanges); } const hasChanged = cache.hasValueChanged(key, modifiedData); @@ -1376,7 +1398,6 @@ const Onyx = { multiSet, merge, mergeCollection, - hasPendingMergeForKey, update, clear, getAllKeys,