Skip to content

Commit

Permalink
API: add IWorkspace.write(Map<IFile, byte[]> ...)
Browse files Browse the repository at this point in the history
to create multiple IFile in a batch.

For example during clean-build JDT first deletes all output folders and
then writes one .class file after the other. Typically many files are
written sequentially. However they could be written in parallel if there
would be an API.

This change keeps all changes to the workspace single threaded but
forwards the IO of creating multiple files to multiple threads.

The single most important use case would be JDT's
AbstractImageBuilder.writeClassFileContents()

The speedup on windows is ~ number of cores, when they have hyper
threading.

OutOfMemory is not to be feared as the caller has full control how many
bytes he passes.
  • Loading branch information
EcljpseB0T committed Sep 11, 2024
1 parent 82287f4 commit 588235e
Show file tree
Hide file tree
Showing 4 changed files with 292 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,12 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ForkJoinWorkerThread;
import java.util.concurrent.atomic.AtomicReference;
import org.eclipse.core.filesystem.EFS;
import org.eclipse.core.filesystem.IFileInfo;
import org.eclipse.core.filesystem.IFileStore;
Expand All @@ -38,6 +44,7 @@
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IPath;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.NullProgressMonitor;
import org.eclipse.core.runtime.OperationCanceledException;
import org.eclipse.core.runtime.Platform;
import org.eclipse.core.runtime.QualifiedName;
Expand Down Expand Up @@ -203,15 +210,15 @@ public void create(byte[] content, int updateFlags, IProgressMonitor monitor) th
}
}

private void checkCreatable() throws CoreException {
void checkCreatable() throws CoreException {
checkDoesNotExist();
Container parent = (Container) getParent();
ResourceInfo info = parent.getResourceInfo(false, false);
parent.checkAccessible(getFlags(info));
checkValidGroupContainer(parent, false, false);
}

private IFileInfo create(int updateFlags, SubMonitor subMonitor, IFileStore store)
IFileInfo create(int updateFlags, IProgressMonitor subMonitor, IFileStore store)
throws CoreException, ResourceException {
String message;
IFileInfo localInfo;
Expand Down Expand Up @@ -391,6 +398,61 @@ protected void internalSetContents(byte[] content, IFileInfo fileInfo, int updat
updateMetadataFiles();
workspace.getAliasManager().updateAliases(this, getStore(), IResource.DEPTH_ZERO, monitor);
}

private static final ExecutorService FILE_WORKER = new ForkJoinPool(
Math.max(1, Runtime.getRuntime().availableProcessors() - 2), pool -> {
ForkJoinWorkerThread t = new ForkJoinWorkerThread(pool) {
// anonymous subclass to access protected constructor
};
t.setName("eclipse.resources.FileWorker"); //$NON-NLS-1$
return t;
}, null, false);

static void internalSetMultipleContents(ConcurrentMap<File, byte[]> filesToCreate, int updateFlags, boolean append,
IProgressMonitor monitor) throws CoreException {
SubMonitor subMonitor = SubMonitor.convert(monitor, filesToCreate.size());
AtomicReference<CoreException> exceptions = new AtomicReference<>();
try {
FILE_WORKER.submit(() -> {
filesToCreate.entrySet().parallelStream().forEach(e -> {
try {
File file = e.getKey();
byte[] content = e.getValue();
writeSingle(updateFlags, append, subMonitor.slice(1), file, content);
} catch (CoreException ce) {
exceptions.getAndUpdate(ex -> {
if (ex == null) {
return ce;
}
ex.addSuppressed(ce);
return ex;
});
}
});
}).get();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException(e);
}
CoreException coreException = exceptions.get();
if (coreException != null) {
throw coreException;
}
NullProgressMonitor npm = new NullProgressMonitor();
for (File file : filesToCreate.keySet()) {
file.updateMetadataFiles();
file.workspace.getAliasManager().updateAliases(file, file.getStore(), IResource.DEPTH_ZERO, npm);
file.setLocal(true);
}
}

private static void writeSingle(int updateFlags, boolean append, IProgressMonitor monitor, File file,
byte[] content) throws CoreException, ResourceException {
IFileStore store = file.getStore();
NullProgressMonitor npm = new NullProgressMonitor();
IFileInfo localInfo = file.create(updateFlags, npm, store);
file.getLocalManager().write(file, content, localInfo, updateFlags, append, monitor);
}

/**
* Optimized refreshLocal for files. This implementation does not block the workspace
* for the common case where the file exists both locally and on the file system, and
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,13 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Predicate;
Expand Down Expand Up @@ -117,6 +121,7 @@
import org.eclipse.core.runtime.jobs.ISchedulingRule;
import org.eclipse.core.runtime.jobs.Job;
import org.eclipse.core.runtime.jobs.JobGroup;
import org.eclipse.core.runtime.jobs.MultiRule;
import org.eclipse.core.runtime.preferences.IEclipsePreferences;
import org.eclipse.core.runtime.preferences.InstanceScope;
import org.eclipse.osgi.util.NLS;
Expand Down Expand Up @@ -2790,4 +2795,91 @@ public IStatus validateFiltered(IResource resource) {
}
return Status.OK_STATUS;
}

@Override
public void write(Map<IFile, byte[]> contentMap, boolean force, boolean derived, boolean keepHistory,
IProgressMonitor monitor) throws CoreException {
Objects.requireNonNull(contentMap);
ConcurrentMap<File, byte[]> filesToCreate = new ConcurrentHashMap<>(contentMap.size());
ConcurrentMap<File, byte[]> filesToReplace = new ConcurrentHashMap<>(contentMap.size());
int updateFlags = (derived ? IResource.DERIVED : IResource.NONE) | (force ? IResource.FORCE : IResource.NONE)
| (keepHistory ? IResource.KEEP_HISTORY : IResource.NONE);
int createFlags = (force ? IResource.FORCE : IResource.NONE) | (derived ? IResource.DERIVED : IResource.NONE);
SubMonitor subMon = SubMonitor.convert(monitor, contentMap.size());
for (Entry<IFile, byte[]> e : contentMap.entrySet()) {
IFile file = Objects.requireNonNull(e.getKey());
byte[] content = Objects.requireNonNull(e.getValue());
if (file.exists()) {
if (file instanceof File f) {
filesToReplace.put(f, content);
} else {
file.setContents(content, updateFlags, subMon.split(1));
}
} else {
if (file instanceof File f) {
filesToCreate.put(f, content);
} else {
file.create(content, createFlags, subMon.split(1));
}
}
}
for (Entry<File, byte[]> e : filesToReplace.entrySet()) {
File file = e.getKey();
byte[] content = e.getValue();
file.setContents(content, updateFlags, subMon.split(1));
}
createMultiple(filesToCreate, createFlags, subMon.split(filesToCreate.size()));
}

/** @see File#create(byte[], int, IProgressMonitor) **/
private void createMultiple(ConcurrentMap<File, byte[]> filesToCreate, int updateFlags, IProgressMonitor monitor)
throws CoreException {
if (filesToCreate.isEmpty()) {
return;
}
Set<File> files = filesToCreate.keySet();
for (File file : files) {
file.checkValidPath(file.path, IResource.FILE, true);
}

IPath name = files.iterator().next().getFullPath(); // XXX any name
SubMonitor subMonitor = SubMonitor.convert(monitor, NLS.bind(Messages.resources_creating, name), 1);
try {
ISchedulingRule rule = MultiRule
.combine(files.stream().map(getRuleFactory()::createRule).toArray(ISchedulingRule[]::new));
NullProgressMonitor npm = new NullProgressMonitor();
try {
prepareOperation(rule, npm);
for (File file : files) {
file.checkCreatable();
}
beginOperation(true);
try {
File.internalSetMultipleContents(filesToCreate, updateFlags, false, subMonitor.newChild(1));
} catch (CoreException | OperationCanceledException e) {
// CoreException when a problem happened creating a file on disk
// OperationCanceledException when the operation of setting contents has been
// canceled
// In either case delete from the workspace and disk
for (File file : files) {
try {
deleteResource(file);
IFileStore store = file.getStore();
store.delete(EFS.NONE, null);
} catch (Exception e2) {
e.addSuppressed(e);
}
}
throw e;
}
} catch (OperationCanceledException e) {
getWorkManager().operationCanceled();
throw e;
} finally {
endOperation(rule, true);
}
} finally {
subMonitor.done();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,19 @@
import java.io.InputStream;
import java.net.URI;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import org.eclipse.core.resources.team.FileModificationValidationContext;
import org.eclipse.core.runtime.*;
import org.eclipse.core.runtime.CoreException;
import org.eclipse.core.runtime.IAdaptable;
import org.eclipse.core.runtime.ICoreRunnable;
import org.eclipse.core.runtime.IPath;
import org.eclipse.core.runtime.IProgressMonitor;
import org.eclipse.core.runtime.IStatus;
import org.eclipse.core.runtime.MultiStatus;
import org.eclipse.core.runtime.OperationCanceledException;
import org.eclipse.core.runtime.Plugin;
import org.eclipse.core.runtime.SubMonitor;
import org.eclipse.core.runtime.jobs.ISchedulingRule;

/**
Expand Down Expand Up @@ -1810,4 +1821,41 @@ public ProjectOrder(IProject[] projects, boolean hasCycles, IProject[][] knots)
* @since 2.1
*/
IPathVariableManager getPathVariableManager();

/**
* Creates the files and sets/replaces the files content. This is a batch
* version of {@code IFile.write(...)}. The files are touched in no particuar
* order and the operation is not guaranteed to be atomic: Exceptions may relate
* to one or multiple files - some files may have been created and other not.
* IResourceChangeListener may receive one or multiple events.
*
* @param contentMap the new content bytes for each IFile. The map must not be
* null and must not contain null keys or null values.
* @param force a flag controlling how to deal with resources that are not
* in sync with the local file system
* @param derived Specifying this flag is equivalent to atomically calling
* {@link IResource#setDerived(boolean)} immediately after
* creating the resource or atomically setting the derived
* flag before setting the content of an already existing
* file if derived==true. A value of false will not update
* the derived flag of an existing file.
* @param keepHistory a flag indicating whether or not store the current
* contents in the local history if the file did already
* exist
* @param monitor a progress monitor, or <code>null</code> if progress
* reporting is not desired
* @throws CoreException if this method fails or is canceled.
* @since 3.22
* @see IFile#write(byte[], boolean, boolean, boolean, IProgressMonitor)
*/
public default void write(Map<IFile, byte[]> contentMap, boolean force, boolean derived, boolean keepHistory,
IProgressMonitor monitor) throws CoreException {
// this code is just meant as an explanation and
// meant to be overridden with a parallel implementation for local files:
Objects.requireNonNull(contentMap);
SubMonitor subMon = SubMonitor.convert(monitor, contentMap.size());
for (Entry<IFile, byte[]> e : contentMap.entrySet()) {
e.getKey().write(e.getValue(), force, derived, keepHistory, subMon.split(1));
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,10 @@
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicInteger;
import org.eclipse.core.internal.resources.ResourceException;
import org.eclipse.core.resources.IContainer;
Expand Down Expand Up @@ -576,6 +579,90 @@ public void testWrite() throws CoreException {
}
}

@Test
public void _testWritePerformanceBatch_() throws CoreException {
createInWorkspace(projects[0]);
Map<IFile, byte[]> fileMap2 = new HashMap<>();
Map<IFile, byte[]> fileMap1 = new HashMap<>();
for (int i = 0; i < 1000; i++) {
IFile file = projects[0].getFile("My" + i + ".class");
removeFromWorkspace(file);
((i % 2 == 0) ? fileMap1 : fileMap2).put(file, ("smallFileContent" + i).getBytes());
}
{
long n0 = System.nanoTime();
ResourcesPlugin.getWorkspace().write(fileMap1, false, true, false, null);
long n1 = System.nanoTime();
System.out.println("parallel write took:" + (n1 - n0) / 1_000_000 + "ms"); // ~ 250ms with 6 cores
}
{
long n0 = System.nanoTime();
for (Entry<IFile, byte[]> e : fileMap2.entrySet()) {
e.getKey().write(e.getValue(), false, true, false, null);
}
long n1 = System.nanoTime();
System.out.println("sequential write took:" + (n1 - n0) / 1_000_000 + "ms"); // ~ 1500ms
}
}

@Test
public void testWrites() throws CoreException {
IWorkspaceDescription description = getWorkspace().getDescription();
description.setMaxFileStates(4);
getWorkspace().setDescription(description);

IFile derived = projects[0].getFile("derived.txt");
IFile anyOther = projects[0].getFile("anyOther.txt");
createInWorkspace(projects[0]);
removeFromWorkspace(derived);
removeFromWorkspace(anyOther);
for (int i = 0; i < 16; i++) {
boolean setDerived = i % 2 == 0;
boolean deleteBefore = (i >> 1) % 2 == 0;
boolean keepHistory = (i >> 2) % 2 == 0;
boolean oldDerived1 = false;
if (deleteBefore) {
derived.delete(false, null);
anyOther.delete(false, null);
} else {
oldDerived1 = derived.isDerived();
}
assertEquals(!deleteBefore, derived.exists());
FussyProgressMonitor monitor = new FussyProgressMonitor();
AtomicInteger changeCount = new AtomicInteger();
ResourcesPlugin.getWorkspace().addResourceChangeListener(event -> changeCount.incrementAndGet());
String derivedContent = "updateOrCreate" + i;
String otherContent = "other" + i;
ResourcesPlugin.getWorkspace().write(
Map.of(derived, derivedContent.getBytes(), anyOther, otherContent.getBytes()), false, setDerived,
keepHistory, monitor);
assertEquals(derivedContent, new String(derived.readAllBytes()));
assertEquals(otherContent, new String(anyOther.readAllBytes()));
monitor.assertUsedUp();
if (deleteBefore) {
assertEquals(setDerived, derived.isDerived());
} else {
assertEquals(oldDerived1 || setDerived, derived.isDerived());
}
assertFalse(derived.isTeamPrivateMember());
assertTrue(derived.exists());

IFileState[] history1 = derived.getHistory(null);
changeCount.set(0);
derivedContent = "update" + i;
otherContent = "dude" + i;
ResourcesPlugin.getWorkspace().write(
Map.of(derived, derivedContent.getBytes(), anyOther, otherContent.getBytes()), false, false,
keepHistory,
null);
assertEquals(derivedContent, new String(derived.readAllBytes()));
assertEquals(otherContent, new String(anyOther.readAllBytes()));
boolean oldDerived2 = derived.isDerived();
assertEquals(oldDerived2, derived.isDerived());
IFileState[] history2 = derived.getHistory(null);
assertEquals((keepHistory && !oldDerived2) ? 1 : 0, history2.length - history1.length);
}
}
@Test
public void testWriteRule() throws CoreException {
IFile resource = projects[0].getFile("derived.txt");
Expand Down

0 comments on commit 588235e

Please sign in to comment.