Skip to content

Commit

Permalink
Merge branch 'master' into MODINVSTOR-1105
Browse files Browse the repository at this point in the history
  • Loading branch information
obozhko-folio authored Sep 15, 2023
2 parents 17c9125 + e4bddfb commit ca9359e
Show file tree
Hide file tree
Showing 23 changed files with 1,240 additions and 81 deletions.
6 changes: 6 additions & 0 deletions descriptors/ModuleDescriptor-template.json
Original file line number Diff line number Diff line change
Expand Up @@ -1304,6 +1304,12 @@
]
}
],
"optional": [
{
"id": "user-tenants",
"version": "1.0"
}
],
"permissionSets": [
{
"permissionName": "inventory-storage.instance-relationships.collection.get",
Expand Down
17 changes: 16 additions & 1 deletion pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@
</dependencyManagement>

<dependencies>
<!-- https://mvnrepository.com/artifact/org.marc4j/marc4j -->
<dependency>
<groupId>org.marc4j</groupId>
<artifactId>marc4j</artifactId>
Expand Down Expand Up @@ -115,6 +114,11 @@
<artifactId>folio-kafka-wrapper</artifactId>
<version>2.7.0</version>
</dependency>
<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
<artifactId>caffeine</artifactId>
<version>3.1.3</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
Expand Down Expand Up @@ -181,6 +185,11 @@
<artifactId>vertx-unit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.vertx</groupId>
<artifactId>vertx-junit5</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
Expand Down Expand Up @@ -208,6 +217,12 @@
<version>5.1.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.github.tomakehurst</groupId>
<artifactId>wiremock</artifactId>
<version>2.27.2</version>
<scope>test</scope>
</dependency>
<dependency> <!-- for testcontainers -->
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j-impl</artifactId>
Expand Down
2 changes: 1 addition & 1 deletion ramls/holdingsrecord.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
},
"hrid": {
"type": "string",
"description": "the human readable ID, also called eye readable ID. A system-assigned sequential ID which maps to the Instance ID"
"description": "the human readable ID, also called eye readable ID. Unique. On creation it can be provided, otherwise the system assigns the next sequential ID using the settings of the /hrid-settings-storage/hrid-settings API. An update with a different hrid is rejected. An update without hrid is populated with the holdings record's current hrid."
},
"holdingsTypeId": {
"type": "string",
Expand Down
2 changes: 1 addition & 1 deletion ramls/item.json
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,7 @@
},
"holdingsRecord2": {
"type": "object",
"description": "Item associated holdings record object.",
"description": "Fake property for mod-graphql to determine record relationships.",
"folio:$ref": "holdingsrecord.json",
"readonly": true,
"folio:isVirtual": true,
Expand Down
26 changes: 26 additions & 0 deletions src/main/java/org/folio/persist/HoldingsRepository.java
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,14 @@

import io.vertx.core.Context;
import io.vertx.core.Future;
import io.vertx.core.json.JsonObject;
import io.vertx.sqlclient.Row;
import io.vertx.sqlclient.RowSet;
import io.vertx.sqlclient.Tuple;
import java.util.List;
import java.util.Map;
import org.folio.cql2pgjson.CQL2PgJSON;
import org.folio.dbschema.ObjectMapperTool;
import org.folio.rest.jaxrs.model.HoldingsRecord;
import org.folio.rest.persist.cql.CQLWrapper;

Expand All @@ -17,6 +21,28 @@ public HoldingsRepository(Context context, Map<String, String> okapiHeaders) {
super(postgresClient(context, okapiHeaders), HOLDINGS_RECORD_TABLE, HoldingsRecord.class);
}

/**
* Upsert holdings records.
*
* <p>Returns
* { "holdingsRecords": [{"old": {...}, "new": {...}}, ...],
* "items": [{"old": {...}, "new": {...}}, ...]
* }
* providing old and new jsonb content of all updated holdings and items. For a newly
* inserted holding only "new" is provided.
*
* @param holdingsRecords records to insert or update (upsert)
*/
public Future<JsonObject> upsert(List<HoldingsRecord> holdingsRecords) {
try {
var array = ObjectMapperTool.getMapper().writeValueAsString(holdingsRecords);
return postgresClient.selectSingle("SELECT upsert_holdings($1::text::jsonb)", Tuple.of(array))
.map(row -> row.getJsonObject(0));
} catch (Exception e) {
return Future.failedFuture(e);
}
}

/**
* Delete by CQL. For each deleted record return a {@link Row} with the instance id String
* and with the holdings' jsonb String.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
import org.folio.rest.exceptions.NotFoundException;
import org.folio.rest.exceptions.ValidationException;
import org.folio.rest.jaxrs.model.Errors;
import org.folio.rest.persist.PgExceptionFacade;
import org.folio.rest.persist.PgExceptionUtil;
import org.folio.rest.persist.cql.CQLQueryValidationException;
import org.folio.rest.tools.client.exceptions.ResponseException;
Expand Down Expand Up @@ -68,6 +69,10 @@ public static Response failureResponse(Throwable error) {
} else if (error instanceof ResponseException) {
return ((ResponseException) error).getResponse();
}
var sqlState = new PgExceptionFacade(error).getSqlState();
if ("239HR".equals(sqlState)) { // Cannot change hrid of holdings record
return textPlainResponse(400, error);
}
String message = PgExceptionUtil.badRequestMessage(error);
if (message != null) {
return textPlainResponse(400, message);
Expand Down
20 changes: 20 additions & 0 deletions src/main/java/org/folio/services/caches/ConsortiumData.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
package org.folio.services.caches;

public class ConsortiumData {

private String centralTenantId;
private String consortiumId;

public ConsortiumData(String centralTenantId, String consortiumId) {
this.centralTenantId = centralTenantId;
this.consortiumId = consortiumId;
}

public String getCentralTenantId() {
return centralTenantId;
}

public String getConsortiumId() {
return consortiumId;
}
}
89 changes: 89 additions & 0 deletions src/main/java/org/folio/services/caches/ConsortiumDataCache.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
package org.folio.services.caches;

import static io.vertx.core.http.HttpMethod.GET;
import static java.net.HttpURLConnection.HTTP_OK;
import static org.folio.okapi.common.XOkapiHeaders.URL;

import com.github.benmanes.caffeine.cache.AsyncCache;
import com.github.benmanes.caffeine.cache.Caffeine;
import io.vertx.core.Future;
import io.vertx.core.Vertx;
import io.vertx.core.buffer.Buffer;
import io.vertx.core.http.HttpClient;
import io.vertx.core.json.JsonArray;
import io.vertx.core.json.JsonObject;
import io.vertx.ext.web.client.HttpRequest;
import io.vertx.ext.web.client.WebClient;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

public class ConsortiumDataCache {

private static final Logger LOG = LogManager.getLogger(ConsortiumDataCache.class);
private static final String EXPIRATION_TIME_PARAM = "cache.consortium-data.expiration.time.seconds";
private static final String DEFAULT_EXPIRATION_TIME_SECONDS = "300";
private static final String USER_TENANTS_PATH = "/user-tenants?limit=1"; //NOSONAR
private static final String USER_TENANTS_FIELD = "userTenants";
private static final String CENTRAL_TENANT_ID_FIELD = "centralTenantId";
private static final String CONSORTIUM_ID_FIELD = "consortiumId";

private final HttpClient httpClient;
private final AsyncCache<String, Optional<ConsortiumData>> cache;

public ConsortiumDataCache(Vertx vertx, HttpClient httpClient) {
int expirationTime = Integer.parseInt(System.getProperty(EXPIRATION_TIME_PARAM, DEFAULT_EXPIRATION_TIME_SECONDS));
this.httpClient = httpClient;
this.cache = Caffeine.newBuilder()
.expireAfterWrite(expirationTime, TimeUnit.SECONDS)
.executor(task -> vertx.runOnContext(v -> task.run()))
.buildAsync();
}

/**
* Returns consortium data by specified {@code tenantId}.
*
* @param tenantId - tenant id
* @param headers - okapi headers
* @return future of Optional with consortium data for the specified {@code tenantId},
* if the specified {@code tenantId} is not included to any consortium, then returns future with empty Optional
*/
public Future<Optional<ConsortiumData>> getConsortiumData(String tenantId, Map<String, String> headers) {
try {
return Future.fromCompletionStage(cache.get(tenantId, (key, executor) -> loadConsortiumData(key, headers)));
} catch (Exception e) {
LOG.warn("getConsortiumData:: Error loading consortium data, tenantId: '{}'", tenantId, e);
return Future.failedFuture(e);
}
}

private CompletableFuture<Optional<ConsortiumData>> loadConsortiumData(String tenantId, Map<String, String> headers) {
String okapiUrl = headers.get(URL);
WebClient client = WebClient.wrap(httpClient);
HttpRequest<Buffer> request = client.requestAbs(GET, okapiUrl + USER_TENANTS_PATH);
headers.forEach(request::putHeader);

return request.send().compose(response -> {
if (response.statusCode() != HTTP_OK) {
String msg = String.format("Error loading consortium data, tenantId: '%s' response status: '%s', body: '%s'",
tenantId, response.statusCode(), response.bodyAsString());
LOG.warn("loadConsortiumData:: {}", msg);
return Future.failedFuture(msg);
}
JsonArray userTenants = response.bodyAsJsonObject().getJsonArray(USER_TENANTS_FIELD);
if (userTenants.isEmpty()) {
return Future.succeededFuture(Optional.<ConsortiumData>empty());
}

LOG.info("loadConsortiumData:: Consortium data was loaded, tenantId: '{}'", tenantId);
JsonObject userTenant = userTenants.getJsonObject(0);
return Future.succeededFuture(Optional.of(
new ConsortiumData(userTenant.getString(CENTRAL_TENANT_ID_FIELD), userTenant.getString(CONSORTIUM_ID_FIELD))));
}).toCompletionStage()
.toCompletableFuture();
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

import io.vertx.core.Future;
import io.vertx.core.Handler;
import io.vertx.core.json.JsonObject;
import java.util.Collection;
import java.util.List;
import javax.ws.rs.core.Response;
Expand Down Expand Up @@ -92,6 +93,13 @@ public Future<Void> publishAllRemoved() {
return domainEventService.publishAllRecordsRemoved();
}

public Future<Void> publishUpserted(String instanceId, JsonObject oldRecord, JsonObject newRecord) {
if (oldRecord == null) {
return domainEventService.publishRecordCreated(instanceId, newRecord.encode());
}
return domainEventService.publishRecordUpdated(instanceId, oldRecord.encode(), newRecord.encode());
}

public Handler<Response> publishUpdated(D oldRecord) {
return response -> {
if (!isUpdateSuccessResponse(response)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,11 @@ Future<Void> publishRecordUpdated(String instanceId, T oldRecord, T newRecord) {
return publish(instanceId, domainEvent);
}

Future<Void> publishRecordUpdated(String instanceId, String oldRecord, String newRecord) {
var domainEvent = DomainEventRaw.updateEvent(oldRecord, newRecord, tenantId(okapiHeaders));
return publish(instanceId, domainEvent);
}

Future<Void> publishRecordsUpdated(Collection<Triple<String, T, T>> updatedRecords) {
if (updatedRecords.isEmpty()) {
return succeededFuture();
Expand All @@ -133,6 +138,11 @@ Future<Void> publishRecordCreated(String instanceId, T newRecord) {
return publish(instanceId, domainEvent);
}

Future<Void> publishRecordCreated(String id, String newRecord) {
var domainEvent = DomainEventRaw.createEvent(newRecord, tenantId(okapiHeaders));
return publish(id, domainEvent);
}

Future<Void> publishRecordsCreated(List<Pair<String, T>> records) {
if (records.isEmpty()) {
return succeededFuture();
Expand Down
Loading

0 comments on commit ca9359e

Please sign in to comment.