mirror of
https://github.com/PaperMC/Paper.git
synced 2024-12-22 22:45:31 +01:00
Rebase chunk patches
This commit is contained in:
parent
819facd7c4
commit
74ad522fc5
665 changed files with 3009 additions and 4490 deletions
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1,397 +0,0 @@
|
|||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
|
||||
Date: Thu, 16 Feb 2023 16:50:05 -0800
|
||||
Subject: [PATCH] Make ChunkStatus.EMPTY not rely on the main thread for
|
||||
completion
|
||||
|
||||
In order to do this, we need to push the POI consistency checks
|
||||
to a later status. Since FULL is the only other status that
|
||||
uses the main thread, it can go there.
|
||||
|
||||
The consistency checks are only really for when a desync occurs,
|
||||
and so that delaying the check only matters when the chunk data
|
||||
has desync'd. As long as the desync is sorted before the
|
||||
chunk is full loaded (i.e before setBlock can occur on
|
||||
a chunk), it should not matter.
|
||||
|
||||
This change is primarily due to behavioural changes
|
||||
in the chunk task queue brought by region threading -
|
||||
which is to split the queue into separate regions. As such,
|
||||
it is required that in order for the sync load to complete
|
||||
that the region owning the chunk drain and execute the task
|
||||
while ticking. However, that is not always possible in
|
||||
region threading. Thus, removing the main thread reliance allows
|
||||
the chunk to progress without requiring a tick thread.
|
||||
Specifically, this allows far sync loads (outside of a specific
|
||||
regions bounds) to occur without issue - namely with structure
|
||||
searching.
|
||||
|
||||
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
|
||||
index fb42d776f15f735fb59e972e00e2b512c23a8387..300700477ee34bc22b31315825c0e40f61070cd5 100644
|
||||
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
|
||||
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkFullTask.java
|
||||
@@ -2,6 +2,8 @@ package io.papermc.paper.chunk.system.scheduling;
|
||||
|
||||
import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
||||
import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
||||
+import com.mojang.logging.LogUtils;
|
||||
+import io.papermc.paper.chunk.system.poi.PoiChunk;
|
||||
import net.minecraft.server.level.ChunkMap;
|
||||
import net.minecraft.server.level.ServerLevel;
|
||||
import net.minecraft.world.level.chunk.ChunkAccess;
|
||||
@@ -9,10 +11,13 @@ import net.minecraft.world.level.chunk.ChunkStatus;
|
||||
import net.minecraft.world.level.chunk.ImposterProtoChunk;
|
||||
import net.minecraft.world.level.chunk.LevelChunk;
|
||||
import net.minecraft.world.level.chunk.ProtoChunk;
|
||||
+import org.slf4j.Logger;
|
||||
import java.lang.invoke.VarHandle;
|
||||
|
||||
public final class ChunkFullTask extends ChunkProgressionTask implements Runnable {
|
||||
|
||||
+ private static final Logger LOGGER = LogUtils.getClassLogger();
|
||||
+
|
||||
protected final NewChunkHolder chunkHolder;
|
||||
protected final ChunkAccess fromChunk;
|
||||
protected final PrioritisedExecutor.PrioritisedTask convertToFullTask;
|
||||
@@ -35,6 +40,15 @@ public final class ChunkFullTask extends ChunkProgressionTask implements Runnabl
|
||||
// See Vanilla protoChunkToFullChunk for what this function should be doing
|
||||
final LevelChunk chunk;
|
||||
try {
|
||||
+ // moved from the load from nbt stage into here
|
||||
+ final PoiChunk poiChunk = this.chunkHolder.getPoiChunk();
|
||||
+ if (poiChunk == null) {
|
||||
+ LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
|
||||
+ } else {
|
||||
+ poiChunk.load();
|
||||
+ this.world.getPoiManager().checkConsistency(this.fromChunk);
|
||||
+ }
|
||||
+
|
||||
if (this.fromChunk instanceof ImposterProtoChunk wrappedFull) {
|
||||
chunk = wrappedFull.getWrapped();
|
||||
} else {
|
||||
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
|
||||
index be6f3f6a57668a9bd50d0ea5f2dd2335355b69d6..1f7c146ff0b2a835c818f49da6c1f1411f26aa39 100644
|
||||
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
|
||||
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
|
||||
@@ -25,6 +25,7 @@ import org.slf4j.Logger;
|
||||
import java.lang.invoke.VarHandle;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
+import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
@@ -34,9 +35,11 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
private final NewChunkHolder chunkHolder;
|
||||
private final ChunkDataLoadTask loadTask;
|
||||
|
||||
- private boolean cancelled;
|
||||
+ private volatile boolean cancelled;
|
||||
private NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask;
|
||||
private NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask;
|
||||
+ private GenericDataLoadTask.TaskResult<ChunkAccess, Throwable> loadResult;
|
||||
+ private final AtomicInteger taskCountToComplete = new AtomicInteger(3); // one for poi, one for entity, and one for chunk data
|
||||
|
||||
protected ChunkLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX, final int chunkZ,
|
||||
final NewChunkHolder chunkHolder, final PrioritisedExecutor.Priority priority) {
|
||||
@@ -44,10 +47,18 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
this.chunkHolder = chunkHolder;
|
||||
this.loadTask = new ChunkDataLoadTask(scheduler, world, chunkX, chunkZ, priority);
|
||||
this.loadTask.addCallback((final GenericDataLoadTask.TaskResult<ChunkAccess, Throwable> result) -> {
|
||||
- ChunkLoadTask.this.complete(result == null ? null : result.left(), result == null ? null : result.right());
|
||||
+ ChunkLoadTask.this.loadResult = result; // must be before getAndDecrement
|
||||
+ ChunkLoadTask.this.tryCompleteLoad();
|
||||
});
|
||||
}
|
||||
|
||||
+ private void tryCompleteLoad() {
|
||||
+ if (this.taskCountToComplete.decrementAndGet() == 0) {
|
||||
+ final GenericDataLoadTask.TaskResult<ChunkAccess, Throwable> result = this.cancelled ? null : this.loadResult; // only after the getAndDecrement
|
||||
+ ChunkLoadTask.this.complete(result == null ? null : result.left(), result == null ? null : result.right());
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
@Override
|
||||
public ChunkStatus getTargetStatus() {
|
||||
return ChunkStatus.EMPTY;
|
||||
@@ -65,11 +76,8 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
final NewChunkHolder.GenericDataLoadTaskCallback entityLoadTask;
|
||||
final NewChunkHolder.GenericDataLoadTaskCallback poiLoadTask;
|
||||
|
||||
- final AtomicInteger count = new AtomicInteger();
|
||||
final Consumer<GenericDataLoadTask.TaskResult<?, ?>> scheduleLoadTask = (final GenericDataLoadTask.TaskResult<?, ?> result) -> {
|
||||
- if (count.decrementAndGet() == 0) {
|
||||
- ChunkLoadTask.this.loadTask.schedule(false);
|
||||
- }
|
||||
+ ChunkLoadTask.this.tryCompleteLoad();
|
||||
};
|
||||
|
||||
// NOTE: it is IMPOSSIBLE for getOrLoadEntityData/getOrLoadPoiData to complete synchronously, because
|
||||
@@ -85,16 +93,16 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
}
|
||||
if (!this.chunkHolder.isEntityChunkNBTLoaded()) {
|
||||
entityLoadTask = this.chunkHolder.getOrLoadEntityData((Consumer)scheduleLoadTask);
|
||||
- count.setPlain(count.getPlain() + 1);
|
||||
} else {
|
||||
entityLoadTask = null;
|
||||
+ this.taskCountToComplete.getAndDecrement(); // we know the chunk load is not done here, as it is not scheduled
|
||||
}
|
||||
|
||||
if (!this.chunkHolder.isPoiChunkLoaded()) {
|
||||
poiLoadTask = this.chunkHolder.getOrLoadPoiData((Consumer)scheduleLoadTask);
|
||||
- count.setPlain(count.getPlain() + 1);
|
||||
} else {
|
||||
poiLoadTask = null;
|
||||
+ this.taskCountToComplete.getAndDecrement(); // we know the chunk load is not done here, as it is not scheduled
|
||||
}
|
||||
|
||||
this.entityLoadTask = entityLoadTask;
|
||||
@@ -107,14 +115,11 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
entityLoadTask.schedule();
|
||||
}
|
||||
|
||||
- if (poiLoadTask != null) {
|
||||
+ if (poiLoadTask != null) {
|
||||
poiLoadTask.schedule();
|
||||
}
|
||||
|
||||
- if (entityLoadTask == null && poiLoadTask == null) {
|
||||
- // no need to wait on those, we can schedule now
|
||||
- this.loadTask.schedule(false);
|
||||
- }
|
||||
+ this.loadTask.schedule(false);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -129,15 +134,20 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
|
||||
/*
|
||||
Note: The entityLoadTask/poiLoadTask do not complete when cancelled,
|
||||
- but this is fine because if they are successfully cancelled then
|
||||
- we will successfully cancel the load task, which will complete when cancelled
|
||||
+ so we need to manually try to complete in those cases
|
||||
+ It is also important to note that we set the cancelled field first, just in case
|
||||
+ the chunk load task attempts to complete with a non-null value
|
||||
*/
|
||||
|
||||
if (this.entityLoadTask != null) {
|
||||
- this.entityLoadTask.cancel();
|
||||
+ if (this.entityLoadTask.cancel()) {
|
||||
+ this.tryCompleteLoad();
|
||||
+ }
|
||||
}
|
||||
if (this.poiLoadTask != null) {
|
||||
- this.poiLoadTask.cancel();
|
||||
+ if (this.poiLoadTask.cancel()) {
|
||||
+ this.tryCompleteLoad();
|
||||
+ }
|
||||
}
|
||||
this.loadTask.cancel();
|
||||
}
|
||||
@@ -249,7 +259,7 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
}
|
||||
}
|
||||
|
||||
- public final class ChunkDataLoadTask extends CallbackDataLoadTask<ChunkSerializer.InProgressChunkHolder, ChunkAccess> {
|
||||
+ public static final class ChunkDataLoadTask extends CallbackDataLoadTask<ChunkAccess, ChunkAccess> {
|
||||
protected ChunkDataLoadTask(final ChunkTaskScheduler scheduler, final ServerLevel world, final int chunkX,
|
||||
final int chunkZ, final PrioritisedExecutor.Priority priority) {
|
||||
super(scheduler, world, chunkX, chunkZ, RegionFileIOThread.RegionFileType.CHUNK_DATA, priority);
|
||||
@@ -262,7 +272,7 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
|
||||
@Override
|
||||
protected boolean hasOnMain() {
|
||||
- return true;
|
||||
+ return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -272,35 +282,30 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
|
||||
@Override
|
||||
protected PrioritisedExecutor.PrioritisedTask createOnMain(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
||||
- return this.scheduler.createChunkTask(this.chunkX, this.chunkZ, run, priority);
|
||||
+ throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
- protected TaskResult<ChunkAccess, Throwable> completeOnMainOffMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) {
|
||||
- if (data != null) {
|
||||
- return null;
|
||||
- }
|
||||
-
|
||||
- final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk();
|
||||
- if (poiChunk == null) {
|
||||
- LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
|
||||
- } else if (!poiChunk.isLoaded()) {
|
||||
- // need to call poiChunk.load() on main
|
||||
- return null;
|
||||
- }
|
||||
+ protected TaskResult<ChunkAccess, Throwable> completeOnMainOffMain(final ChunkAccess data, final Throwable throwable) {
|
||||
+ throw new UnsupportedOperationException();
|
||||
+ }
|
||||
|
||||
- return new TaskResult<>(this.getEmptyChunk(), null);
|
||||
+ private ProtoChunk getEmptyChunk() {
|
||||
+ return new ProtoChunk(
|
||||
+ new ChunkPos(this.chunkX, this.chunkZ), UpgradeData.EMPTY, this.world,
|
||||
+ this.world.registryAccess().registryOrThrow(Registries.BIOME), (BlendingData)null
|
||||
+ );
|
||||
}
|
||||
|
||||
@Override
|
||||
- protected TaskResult<ChunkSerializer.InProgressChunkHolder, Throwable> runOffMain(final CompoundTag data, final Throwable throwable) {
|
||||
+ protected TaskResult<ChunkAccess, Throwable> runOffMain(final CompoundTag data, final Throwable throwable) {
|
||||
if (throwable != null) {
|
||||
LOGGER.error("Failed to load chunk data for task: " + this.toString() + ", chunk data will be lost", throwable);
|
||||
- return new TaskResult<>(null, null);
|
||||
+ return new TaskResult<>(this.getEmptyChunk(), null);
|
||||
}
|
||||
|
||||
if (data == null) {
|
||||
- return new TaskResult<>(null, null);
|
||||
+ return new TaskResult<>(this.getEmptyChunk(), null);
|
||||
}
|
||||
|
||||
// need to convert data, and then deserialize it
|
||||
@@ -319,53 +324,18 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
this.world, chunkMap.getPoiManager(), chunkPos, converted, true
|
||||
);
|
||||
|
||||
- return new TaskResult<>(chunkHolder, null);
|
||||
+ return new TaskResult<>(chunkHolder.protoChunk, null);
|
||||
} catch (final ThreadDeath death) {
|
||||
throw death;
|
||||
} catch (final Throwable thr2) {
|
||||
LOGGER.error("Failed to parse chunk data for task: " + this.toString() + ", chunk data will be lost", thr2);
|
||||
- return new TaskResult<>(null, thr2);
|
||||
+ return new TaskResult<>(this.getEmptyChunk(), null);
|
||||
}
|
||||
}
|
||||
|
||||
- private ProtoChunk getEmptyChunk() {
|
||||
- return new ProtoChunk(
|
||||
- new ChunkPos(this.chunkX, this.chunkZ), UpgradeData.EMPTY, this.world,
|
||||
- this.world.registryAccess().registryOrThrow(Registries.BIOME), (BlendingData)null
|
||||
- );
|
||||
- }
|
||||
-
|
||||
@Override
|
||||
- protected TaskResult<ChunkAccess, Throwable> runOnMain(final ChunkSerializer.InProgressChunkHolder data, final Throwable throwable) {
|
||||
- final PoiChunk poiChunk = ChunkLoadTask.this.chunkHolder.getPoiChunk();
|
||||
- if (poiChunk == null) {
|
||||
- LOGGER.error("Expected poi chunk to be loaded with chunk for task " + this.toString());
|
||||
- } else {
|
||||
- poiChunk.load();
|
||||
- }
|
||||
-
|
||||
- if (data == null || data.protoChunk == null) {
|
||||
- // throwable could be non-null, but the off-main task will print its exceptions - so we don't need to care,
|
||||
- // it's handled already
|
||||
-
|
||||
- return new TaskResult<>(this.getEmptyChunk(), null);
|
||||
- }
|
||||
-
|
||||
- // have tasks to run (at this point, it's just the POI consistency checking)
|
||||
- try {
|
||||
- if (data.tasks != null) {
|
||||
- for (int i = 0, len = data.tasks.size(); i < len; ++i) {
|
||||
- data.tasks.poll().run();
|
||||
- }
|
||||
- }
|
||||
-
|
||||
- return new TaskResult<>(data.protoChunk, null);
|
||||
- } catch (final ThreadDeath death) {
|
||||
- throw death;
|
||||
- } catch (final Throwable thr2) {
|
||||
- LOGGER.error("Failed to parse main tasks for task " + this.toString() + ", chunk data will be lost", thr2);
|
||||
- return new TaskResult<>(this.getEmptyChunk(), null);
|
||||
- }
|
||||
+ protected TaskResult<ChunkAccess, Throwable> runOnMain(final ChunkAccess data, final Throwable throwable) {
|
||||
+ throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java
|
||||
index f10ba4211cbdcc4f4ce3585c7cb3f80185e13b73..6f2c7baea0d1ac7813c7b85e1f5558573745762c 100644
|
||||
--- a/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java
|
||||
+++ b/src/main/java/net/minecraft/world/entity/ai/village/poi/PoiManager.java
|
||||
@@ -310,6 +310,17 @@ public class PoiManager extends SectionStorage<PoiSection> {
|
||||
}
|
||||
}
|
||||
}
|
||||
+
|
||||
+ public void checkConsistency(net.minecraft.world.level.chunk.ChunkAccess chunk) {
|
||||
+ int chunkX = chunk.getPos().x;
|
||||
+ int chunkZ = chunk.getPos().z;
|
||||
+ int minY = io.papermc.paper.util.WorldUtil.getMinSection(chunk);
|
||||
+ int maxY = io.papermc.paper.util.WorldUtil.getMaxSection(chunk);
|
||||
+ LevelChunkSection[] sections = chunk.getSections();
|
||||
+ for (int section = minY; section <= maxY; ++section) {
|
||||
+ this.checkConsistencyWithBlocks(SectionPos.of(chunkX, section, chunkZ), sections[section - minY]);
|
||||
+ }
|
||||
+ }
|
||||
// Paper end - rewrite chunk system
|
||||
|
||||
public void checkConsistencyWithBlocks(SectionPos sectionPos, LevelChunkSection chunkSection) {
|
||||
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
|
||||
index 55da32077d1db81ba197da0be5896da694f4bfa9..a7ee469bb2880a78540b79ae691ea449dfe22ce4 100644
|
||||
--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
|
||||
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
|
||||
@@ -98,13 +98,11 @@ public class ChunkSerializer {
|
||||
public static final class InProgressChunkHolder {
|
||||
|
||||
public final ProtoChunk protoChunk;
|
||||
- public final java.util.ArrayDeque<Runnable> tasks;
|
||||
|
||||
public CompoundTag poiData;
|
||||
|
||||
- public InProgressChunkHolder(final ProtoChunk protoChunk, final java.util.ArrayDeque<Runnable> tasks) {
|
||||
+ public InProgressChunkHolder(final ProtoChunk protoChunk) {
|
||||
this.protoChunk = protoChunk;
|
||||
- this.tasks = tasks;
|
||||
}
|
||||
}
|
||||
// Paper end
|
||||
@@ -112,12 +110,10 @@ public class ChunkSerializer {
|
||||
public static ProtoChunk read(ServerLevel world, PoiManager poiStorage, ChunkPos chunkPos, CompoundTag nbt) {
|
||||
// Paper start - add variant for async calls
|
||||
InProgressChunkHolder holder = loadChunk(world, poiStorage, chunkPos, nbt, true);
|
||||
- holder.tasks.forEach(Runnable::run);
|
||||
return holder.protoChunk;
|
||||
}
|
||||
|
||||
public static InProgressChunkHolder loadChunk(ServerLevel world, PoiManager poiStorage, ChunkPos chunkPos, CompoundTag nbt, boolean distinguish) {
|
||||
- java.util.ArrayDeque<Runnable> tasksToExecuteOnMain = new java.util.ArrayDeque<>();
|
||||
// Paper end
|
||||
ChunkPos chunkcoordintpair1 = new ChunkPos(nbt.getInt("xPos"), nbt.getInt("zPos"));
|
||||
|
||||
@@ -184,9 +180,7 @@ public class ChunkSerializer {
|
||||
achunksection[k] = chunksection;
|
||||
SectionPos sectionposition = SectionPos.of(chunkPos, b0);
|
||||
|
||||
- tasksToExecuteOnMain.add(() -> { // Paper - delay this task since we're executing off-main
|
||||
- poiStorage.checkConsistencyWithBlocks(sectionposition, chunksection);
|
||||
- }); // Paper - delay this task since we're executing off-main
|
||||
+ // Paper - rewrite chunk system - moved to final load stage
|
||||
}
|
||||
|
||||
boolean flag3 = nbttagcompound1.contains("BlockLight", 7);
|
||||
@@ -332,7 +326,7 @@ public class ChunkSerializer {
|
||||
}
|
||||
|
||||
if (chunkstatus_type == ChunkStatus.ChunkType.LEVELCHUNK) {
|
||||
- return new InProgressChunkHolder(new ImposterProtoChunk((LevelChunk) object1, false), tasksToExecuteOnMain); // Paper - Async chunk loading
|
||||
+ return new InProgressChunkHolder(new ImposterProtoChunk((LevelChunk) object1, false)); // Paper - Async chunk loading
|
||||
} else {
|
||||
ProtoChunk protochunk1 = (ProtoChunk) object1;
|
||||
|
||||
@@ -360,7 +354,7 @@ public class ChunkSerializer {
|
||||
protochunk1.setCarvingMask(worldgenstage_features, new CarvingMask(nbttagcompound5.getLongArray(s1), ((ChunkAccess) object1).getMinBuildHeight()));
|
||||
}
|
||||
|
||||
- return new InProgressChunkHolder(protochunk1, tasksToExecuteOnMain); // Paper - Async chunk loading
|
||||
+ return new InProgressChunkHolder(protochunk1); // Paper - Async chunk loading
|
||||
}
|
||||
}
|
||||
|
|
@ -1,999 +0,0 @@
|
|||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
|
||||
Date: Sun, 26 Feb 2023 23:42:29 -0800
|
||||
Subject: [PATCH] Increase parallelism for neighbour writing chunk statuses
|
||||
|
||||
Namely, everything after FEATURES. By creating a dependency
|
||||
chain indicating what chunks are in use, we can safely
|
||||
schedule completely independent tasks in parallel. This
|
||||
will allow the chunk system to scale beyond 10 threads
|
||||
per world.
|
||||
|
||||
diff --git a/src/main/java/io/papermc/paper/chunk/system/RegionizedPlayerChunkLoader.java b/src/main/java/io/papermc/paper/chunk/system/RegionizedPlayerChunkLoader.java
|
||||
index cf7610b3396d03bf79a899d5d9cfc6debb5b90be..48bfee5b9db501fcdba4ddb1e4bff2718956a680 100644
|
||||
--- a/src/main/java/io/papermc/paper/chunk/system/RegionizedPlayerChunkLoader.java
|
||||
+++ b/src/main/java/io/papermc/paper/chunk/system/RegionizedPlayerChunkLoader.java
|
||||
@@ -286,7 +286,92 @@ public class RegionizedPlayerChunkLoader {
|
||||
}
|
||||
}
|
||||
|
||||
- return chunks.toLongArray();
|
||||
+ // to increase generation parallelism, we want to space the chunks out so that they are not nearby when generating
|
||||
+ // this also means we are minimising locality
|
||||
+ // but, we need to maintain sorted order by manhatten distance
|
||||
+
|
||||
+ // first, build a map of manhatten distance -> chunks
|
||||
+ final java.util.List<LongArrayList> byDistance = new java.util.ArrayList<>();
|
||||
+ for (final it.unimi.dsi.fastutil.longs.LongIterator iterator = chunks.iterator(); iterator.hasNext();) {
|
||||
+ final long chunkKey = iterator.nextLong();
|
||||
+
|
||||
+ final int chunkX = CoordinateUtils.getChunkX(chunkKey);
|
||||
+ final int chunkZ = CoordinateUtils.getChunkZ(chunkKey);
|
||||
+
|
||||
+ final int dist = Math.abs(chunkX) + Math.abs(chunkZ);
|
||||
+ if (dist == byDistance.size()) {
|
||||
+ final LongArrayList list = new LongArrayList();
|
||||
+ list.add(chunkKey);
|
||||
+ byDistance.add(list);
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ byDistance.get(dist).add(chunkKey);
|
||||
+ }
|
||||
+
|
||||
+ // per distance we transform the chunk list so that each element is maximally spaced out from each other
|
||||
+ for (int i = 0, len = byDistance.size(); i < len; ++i) {
|
||||
+ final LongArrayList notAdded = byDistance.get(i).clone();
|
||||
+ final LongArrayList added = new LongArrayList();
|
||||
+
|
||||
+ while (!notAdded.isEmpty()) {
|
||||
+ if (added.isEmpty()) {
|
||||
+ added.add(notAdded.removeLong(notAdded.size() - 1));
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ long maxChunk = -1L;
|
||||
+ int maxDist = 0;
|
||||
+
|
||||
+ // select the chunk from the not yet added set that has the largest minimum distance from
|
||||
+ // the current set of added chunks
|
||||
+
|
||||
+ for (final it.unimi.dsi.fastutil.longs.LongIterator iterator = notAdded.iterator(); iterator.hasNext();) {
|
||||
+ final long chunkKey = iterator.nextLong();
|
||||
+ final int chunkX = CoordinateUtils.getChunkX(chunkKey);
|
||||
+ final int chunkZ = CoordinateUtils.getChunkZ(chunkKey);
|
||||
+
|
||||
+ int minDist = Integer.MAX_VALUE;
|
||||
+
|
||||
+ for (final it.unimi.dsi.fastutil.longs.LongIterator iterator2 = added.iterator(); iterator2.hasNext();) {
|
||||
+ final long addedKey = iterator2.nextLong();
|
||||
+ final int addedX = CoordinateUtils.getChunkX(addedKey);
|
||||
+ final int addedZ = CoordinateUtils.getChunkZ(addedKey);
|
||||
+
|
||||
+ // here we use square distance because chunk generation uses neighbours in a square radius
|
||||
+ final int dist = Math.max(Math.abs(addedX - chunkX), Math.abs(addedZ - chunkZ));
|
||||
+
|
||||
+ if (dist < minDist) {
|
||||
+ minDist = dist;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (minDist > maxDist) {
|
||||
+ maxDist = minDist;
|
||||
+ maxChunk = chunkKey;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ // move the selected chunk from the not added set to the added set
|
||||
+
|
||||
+ if (!notAdded.rem(maxChunk)) {
|
||||
+ throw new IllegalStateException();
|
||||
+ }
|
||||
+
|
||||
+ added.add(maxChunk);
|
||||
+ }
|
||||
+
|
||||
+ byDistance.set(i, added);
|
||||
+ }
|
||||
+
|
||||
+ // now, rebuild the list so that it still maintains manhatten distance order
|
||||
+ final LongArrayList ret = new LongArrayList(chunks.size());
|
||||
+
|
||||
+ for (final LongArrayList dist : byDistance) {
|
||||
+ ret.addAll(dist);
|
||||
+ }
|
||||
+
|
||||
+ return ret.toLongArray();
|
||||
}
|
||||
|
||||
public static final class PlayerChunkLoaderData {
|
||||
diff --git a/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
|
||||
index 0b7a2b0ead4f3bc07bfd9a38c2b7cf024bd140c6..36e93fefdfbebddce4c153974c7cd81af3cb92e9 100644
|
||||
--- a/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
|
||||
+++ b/src/main/java/io/papermc/paper/chunk/system/light/LightQueue.java
|
||||
@@ -4,7 +4,6 @@ import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
||||
import ca.spottedleaf.starlight.common.light.BlockStarLightEngine;
|
||||
import ca.spottedleaf.starlight.common.light.SkyStarLightEngine;
|
||||
import ca.spottedleaf.starlight.common.light.StarLightInterface;
|
||||
-import io.papermc.paper.chunk.system.scheduling.ChunkTaskScheduler;
|
||||
import io.papermc.paper.util.CoordinateUtils;
|
||||
import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap;
|
||||
import it.unimi.dsi.fastutil.shorts.ShortCollection;
|
||||
@@ -13,6 +12,7 @@ import net.minecraft.core.BlockPos;
|
||||
import net.minecraft.core.SectionPos;
|
||||
import net.minecraft.server.level.ServerLevel;
|
||||
import net.minecraft.world.level.ChunkPos;
|
||||
+import net.minecraft.world.level.chunk.ChunkStatus;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
@@ -201,7 +201,10 @@ public final class LightQueue {
|
||||
this.chunkCoordinate = chunkCoordinate;
|
||||
this.lightEngine = lightEngine;
|
||||
this.queue = queue;
|
||||
- this.task = queue.world.chunkTaskScheduler.lightExecutor.createTask(this, priority);
|
||||
+ this.task = queue.world.chunkTaskScheduler.radiusAwareScheduler.createTask(
|
||||
+ CoordinateUtils.getChunkX(chunkCoordinate), CoordinateUtils.getChunkZ(chunkCoordinate),
|
||||
+ ChunkStatus.LIGHT.writeRadius, this, priority
|
||||
+ );
|
||||
}
|
||||
|
||||
public void schedule() {
|
||||
@@ -230,23 +233,23 @@ public final class LightQueue {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
- final SkyStarLightEngine skyEngine = this.lightEngine.getSkyLightEngine();
|
||||
- final BlockStarLightEngine blockEngine = this.lightEngine.getBlockLightEngine();
|
||||
- try {
|
||||
- synchronized (this.queue) {
|
||||
- this.queue.chunkTasks.remove(this.chunkCoordinate);
|
||||
- }
|
||||
+ synchronized (this.queue) {
|
||||
+ this.queue.chunkTasks.remove(this.chunkCoordinate);
|
||||
+ }
|
||||
|
||||
- boolean litChunk = false;
|
||||
- if (this.lightTasks != null) {
|
||||
- for (final BooleanSupplier run : this.lightTasks) {
|
||||
- if (run.getAsBoolean()) {
|
||||
- litChunk = true;
|
||||
- break;
|
||||
- }
|
||||
+ boolean litChunk = false;
|
||||
+ if (this.lightTasks != null) {
|
||||
+ for (final BooleanSupplier run : this.lightTasks) {
|
||||
+ if (run.getAsBoolean()) {
|
||||
+ litChunk = true;
|
||||
+ break;
|
||||
}
|
||||
}
|
||||
+ }
|
||||
|
||||
+ final SkyStarLightEngine skyEngine = this.lightEngine.getSkyLightEngine();
|
||||
+ final BlockStarLightEngine blockEngine = this.lightEngine.getBlockLightEngine();
|
||||
+ try {
|
||||
final long coordinate = this.chunkCoordinate;
|
||||
final int chunkX = CoordinateUtils.getChunkX(coordinate);
|
||||
final int chunkZ = CoordinateUtils.getChunkZ(coordinate);
|
||||
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
|
||||
index ce26fdfa1afc74ba93d19157042f6c55778011e1..718c1dd7b52fb9a501d552fdbcb3f9ff79d127d8 100644
|
||||
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
|
||||
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkHolderManager.java
|
||||
@@ -1022,17 +1022,23 @@ public final class ChunkHolderManager {
|
||||
}
|
||||
|
||||
public Boolean tryDrainTicketUpdates() {
|
||||
- final boolean acquired = this.ticketLock.tryLock();
|
||||
- try {
|
||||
- if (!acquired) {
|
||||
- return null;
|
||||
- }
|
||||
+ boolean ret = false;
|
||||
+ for (;;) {
|
||||
+ final boolean acquired = this.ticketLock.tryLock();
|
||||
+ try {
|
||||
+ if (!acquired) {
|
||||
+ return ret ? Boolean.TRUE : null;
|
||||
+ }
|
||||
|
||||
- return Boolean.valueOf(this.drainTicketUpdates());
|
||||
- } finally {
|
||||
- if (acquired) {
|
||||
- this.ticketLock.unlock();
|
||||
+ ret |= this.drainTicketUpdates();
|
||||
+ } finally {
|
||||
+ if (acquired) {
|
||||
+ this.ticketLock.unlock();
|
||||
+ }
|
||||
}
|
||||
+ if (this.delayedTicketUpdates.isEmpty()) {
|
||||
+ return Boolean.valueOf(ret);
|
||||
+ } // else: try to re-acquire
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
|
||||
index 84d6af5c28cd0e81d50701bebe122f462720fbf8..d2bb266a5ed344507058778a94a8a4dcac61ba17 100644
|
||||
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
|
||||
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkTaskScheduler.java
|
||||
@@ -2,9 +2,9 @@ package io.papermc.paper.chunk.system.scheduling;
|
||||
|
||||
import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
||||
import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadPool;
|
||||
-import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedThreadedTaskQueue;
|
||||
import ca.spottedleaf.concurrentutil.util.ConcurrentUtil;
|
||||
import com.mojang.logging.LogUtils;
|
||||
+import io.papermc.paper.chunk.system.scheduling.queue.RadiusAwarePrioritisedExecutor;
|
||||
import io.papermc.paper.configuration.GlobalConfiguration;
|
||||
import io.papermc.paper.util.CoordinateUtils;
|
||||
import io.papermc.paper.util.TickThread;
|
||||
@@ -21,7 +21,6 @@ import net.minecraft.world.level.ChunkPos;
|
||||
import net.minecraft.world.level.chunk.ChunkAccess;
|
||||
import net.minecraft.world.level.chunk.ChunkStatus;
|
||||
import net.minecraft.world.level.chunk.LevelChunk;
|
||||
-import org.bukkit.Bukkit;
|
||||
import org.slf4j.Logger;
|
||||
import java.io.File;
|
||||
import java.util.ArrayDeque;
|
||||
@@ -34,7 +33,6 @@ import java.util.Objects;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
-import java.util.function.BooleanSupplier;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public final class ChunkTaskScheduler {
|
||||
@@ -108,9 +106,9 @@ public final class ChunkTaskScheduler {
|
||||
|
||||
public final ServerLevel world;
|
||||
public final PrioritisedThreadPool workers;
|
||||
- public final PrioritisedThreadPool.PrioritisedPoolExecutor lightExecutor;
|
||||
- public final PrioritisedThreadPool.PrioritisedPoolExecutor genExecutor;
|
||||
+ public final RadiusAwarePrioritisedExecutor radiusAwareScheduler;
|
||||
public final PrioritisedThreadPool.PrioritisedPoolExecutor parallelGenExecutor;
|
||||
+ private final PrioritisedThreadPool.PrioritisedPoolExecutor radiusAwareGenExecutor;
|
||||
public final PrioritisedThreadPool.PrioritisedPoolExecutor loadExecutor;
|
||||
|
||||
private final PrioritisedThreadedTaskQueue mainThreadExecutor = new PrioritisedThreadedTaskQueue();
|
||||
@@ -191,12 +189,11 @@ public final class ChunkTaskScheduler {
|
||||
this.workers = workers;
|
||||
|
||||
final String worldName = world.getWorld().getName();
|
||||
- this.genExecutor = workers.createExecutor("Chunk single-threaded generation executor for world '" + worldName + "'", 1);
|
||||
- // same as genExecutor, as there are race conditions between updating blocks in FEATURE status while lighting chunks
|
||||
- this.lightExecutor = this.genExecutor;
|
||||
- this.parallelGenExecutor = newChunkSystemGenParallelism <= 1 ? this.genExecutor
|
||||
- : workers.createExecutor("Chunk parallel generation executor for world '" + worldName + "'", newChunkSystemGenParallelism);
|
||||
+ this.parallelGenExecutor = workers.createExecutor("Chunk parallel generation executor for world '" + worldName + "'", Math.max(1, newChunkSystemGenParallelism));
|
||||
+ this.radiusAwareGenExecutor =
|
||||
+ newChunkSystemGenParallelism <= 1 ? this.parallelGenExecutor : workers.createExecutor("Chunk radius aware generator for world '" + worldName + "'", newChunkSystemGenParallelism);
|
||||
this.loadExecutor = workers.createExecutor("Chunk load executor for world '" + worldName + "'", newChunkSystemLoadParallelism);
|
||||
+ this.radiusAwareScheduler = new RadiusAwarePrioritisedExecutor(this.radiusAwareGenExecutor, Math.max(1, newChunkSystemGenParallelism));
|
||||
this.chunkHolderManager = new ChunkHolderManager(world, this);
|
||||
}
|
||||
|
||||
@@ -688,16 +685,14 @@ public final class ChunkTaskScheduler {
|
||||
}
|
||||
|
||||
public boolean halt(final boolean sync, final long maxWaitNS) {
|
||||
- this.lightExecutor.halt();
|
||||
- this.genExecutor.halt();
|
||||
+ this.radiusAwareGenExecutor.halt();
|
||||
this.parallelGenExecutor.halt();
|
||||
this.loadExecutor.halt();
|
||||
final long time = System.nanoTime();
|
||||
if (sync) {
|
||||
for (long failures = 9L;; failures = ConcurrentUtil.linearLongBackoff(failures, 500_000L, 50_000_000L)) {
|
||||
if (
|
||||
- !this.lightExecutor.isActive() &&
|
||||
- !this.genExecutor.isActive() &&
|
||||
+ !this.radiusAwareGenExecutor.isActive() &&
|
||||
!this.parallelGenExecutor.isActive() &&
|
||||
!this.loadExecutor.isActive()
|
||||
) {
|
||||
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
|
||||
index 73ce0909bd89244835a0d0f2030a25871461f1e0..ecc366a4176b2efadc46aa91aa21621f0fc6abe9 100644
|
||||
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
|
||||
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkUpgradeGenericStatusTask.java
|
||||
@@ -39,8 +39,11 @@ public final class ChunkUpgradeGenericStatusTask extends ChunkProgressionTask im
|
||||
this.fromStatus = chunk.getStatus();
|
||||
this.toStatus = toStatus;
|
||||
this.neighbours = neighbours;
|
||||
- this.generateTask = (this.toStatus.isParallelCapable ? this.scheduler.parallelGenExecutor : this.scheduler.genExecutor)
|
||||
- .createTask(this, priority);
|
||||
+ if (this.toStatus.isParallelCapable) {
|
||||
+ this.generateTask = this.scheduler.parallelGenExecutor.createTask(this, priority);
|
||||
+ } else {
|
||||
+ this.generateTask = this.scheduler.radiusAwareScheduler.createTask(chunkX, chunkZ, this.toStatus.writeRadius, this, priority);
|
||||
+ }
|
||||
}
|
||||
|
||||
@Override
|
||||
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
|
||||
new file mode 100644
|
||||
index 0000000000000000000000000000000000000000..3272f73013ea7d4efdd0ae2903925cc543be7075
|
||||
--- /dev/null
|
||||
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/queue/RadiusAwarePrioritisedExecutor.java
|
||||
@@ -0,0 +1,668 @@
|
||||
+package io.papermc.paper.chunk.system.scheduling.queue;
|
||||
+
|
||||
+import ca.spottedleaf.concurrentutil.executor.standard.PrioritisedExecutor;
|
||||
+import io.papermc.paper.util.CoordinateUtils;
|
||||
+import it.unimi.dsi.fastutil.longs.Long2ReferenceOpenHashMap;
|
||||
+import it.unimi.dsi.fastutil.objects.ReferenceOpenHashSet;
|
||||
+import java.util.ArrayList;
|
||||
+import java.util.Comparator;
|
||||
+import java.util.List;
|
||||
+import java.util.PriorityQueue;
|
||||
+
|
||||
+public class RadiusAwarePrioritisedExecutor {
|
||||
+
|
||||
+ private static final Comparator<DependencyNode> DEPENDENCY_NODE_COMPARATOR = (final DependencyNode t1, final DependencyNode t2) -> {
|
||||
+ return Long.compare(t1.id, t2.id);
|
||||
+ };
|
||||
+
|
||||
+ private final DependencyTree[] queues = new DependencyTree[PrioritisedExecutor.Priority.TOTAL_SCHEDULABLE_PRIORITIES];
|
||||
+ private static final int NO_TASKS_QUEUED = -1;
|
||||
+ private int selectedQueue = NO_TASKS_QUEUED;
|
||||
+ private boolean canQueueTasks = true;
|
||||
+
|
||||
+ public RadiusAwarePrioritisedExecutor(final PrioritisedExecutor executor, final int maxToSchedule) {
|
||||
+ for (int i = 0; i < this.queues.length; ++i) {
|
||||
+ this.queues[i] = new DependencyTree(this, executor, maxToSchedule, i);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ private boolean canQueueTasks() {
|
||||
+ return this.canQueueTasks;
|
||||
+ }
|
||||
+
|
||||
+ private List<PrioritisedExecutor.PrioritisedTask> treeFinished() {
|
||||
+ this.canQueueTasks = true;
|
||||
+ for (int priority = 0; priority < this.queues.length; ++priority) {
|
||||
+ final DependencyTree queue = this.queues[priority];
|
||||
+ if (queue.hasWaitingTasks()) {
|
||||
+ final List<PrioritisedExecutor.PrioritisedTask> ret = queue.tryPushTasks();
|
||||
+
|
||||
+ if (ret == null || ret.isEmpty()) {
|
||||
+ // this happens when the tasks in the wait queue were purged
|
||||
+ // in this case, the queue was actually empty, we just had to purge it
|
||||
+ // if we set the selected queue without scheduling any tasks, the queue will never be unselected
|
||||
+ // as that requires a scheduled task completing...
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ this.selectedQueue = priority;
|
||||
+ return ret;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ this.selectedQueue = NO_TASKS_QUEUED;
|
||||
+
|
||||
+ return null;
|
||||
+ }
|
||||
+
|
||||
+ private List<PrioritisedExecutor.PrioritisedTask> queue(final Task task, final PrioritisedExecutor.Priority priority) {
|
||||
+ final int priorityId = priority.priority;
|
||||
+ final DependencyTree queue = this.queues[priorityId];
|
||||
+
|
||||
+ final DependencyNode node = new DependencyNode(task, queue);
|
||||
+
|
||||
+ if (task.dependencyNode != null) {
|
||||
+ throw new IllegalStateException();
|
||||
+ }
|
||||
+ task.dependencyNode = node;
|
||||
+
|
||||
+ queue.pushNode(node);
|
||||
+
|
||||
+ if (this.selectedQueue == NO_TASKS_QUEUED) {
|
||||
+ this.canQueueTasks = true;
|
||||
+ this.selectedQueue = priorityId;
|
||||
+ return queue.tryPushTasks();
|
||||
+ }
|
||||
+
|
||||
+ if (!this.canQueueTasks) {
|
||||
+ return null;
|
||||
+ }
|
||||
+
|
||||
+ if (PrioritisedExecutor.Priority.isHigherPriority(priorityId, this.selectedQueue)) {
|
||||
+ // prevent the lower priority tree from queueing more tasks
|
||||
+ this.canQueueTasks = false;
|
||||
+ return null;
|
||||
+ }
|
||||
+
|
||||
+ // priorityId != selectedQueue: lower priority, don't care - treeFinished will pick it up
|
||||
+ return priorityId == this.selectedQueue ? queue.tryPushTasks() : null;
|
||||
+ }
|
||||
+
|
||||
+ public PrioritisedExecutor.PrioritisedTask createTask(final int chunkX, final int chunkZ, final int radius,
|
||||
+ final Runnable run, final PrioritisedExecutor.Priority priority) {
|
||||
+ if (radius < 0) {
|
||||
+ throw new IllegalArgumentException("Radius must be > 0: " + radius);
|
||||
+ }
|
||||
+ return new Task(this, chunkX, chunkZ, radius, run, priority);
|
||||
+ }
|
||||
+
|
||||
+ public PrioritisedExecutor.PrioritisedTask createTask(final int chunkX, final int chunkZ, final int radius,
|
||||
+ final Runnable run) {
|
||||
+ return this.createTask(chunkX, chunkZ, radius, run, PrioritisedExecutor.Priority.NORMAL);
|
||||
+ }
|
||||
+
|
||||
+ public PrioritisedExecutor.PrioritisedTask queueTask(final int chunkX, final int chunkZ, final int radius,
|
||||
+ final Runnable run, final PrioritisedExecutor.Priority priority) {
|
||||
+ final PrioritisedExecutor.PrioritisedTask ret = this.createTask(chunkX, chunkZ, radius, run, priority);
|
||||
+
|
||||
+ ret.queue();
|
||||
+
|
||||
+ return ret;
|
||||
+ }
|
||||
+
|
||||
+ public PrioritisedExecutor.PrioritisedTask queueTask(final int chunkX, final int chunkZ, final int radius,
|
||||
+ final Runnable run) {
|
||||
+ final PrioritisedExecutor.PrioritisedTask ret = this.createTask(chunkX, chunkZ, radius, run);
|
||||
+
|
||||
+ ret.queue();
|
||||
+
|
||||
+ return ret;
|
||||
+ }
|
||||
+
|
||||
+ public PrioritisedExecutor.PrioritisedTask createInfiniteRadiusTask(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
||||
+ return new Task(this, 0, 0, -1, run, priority);
|
||||
+ }
|
||||
+
|
||||
+ public PrioritisedExecutor.PrioritisedTask createInfiniteRadiusTask(final Runnable run) {
|
||||
+ return this.createInfiniteRadiusTask(run, PrioritisedExecutor.Priority.NORMAL);
|
||||
+ }
|
||||
+
|
||||
+ public PrioritisedExecutor.PrioritisedTask queueInfiniteRadiusTask(final Runnable run, final PrioritisedExecutor.Priority priority) {
|
||||
+ final PrioritisedExecutor.PrioritisedTask ret = this.createInfiniteRadiusTask(run, priority);
|
||||
+
|
||||
+ ret.queue();
|
||||
+
|
||||
+ return ret;
|
||||
+ }
|
||||
+
|
||||
+ public PrioritisedExecutor.PrioritisedTask queueInfiniteRadiusTask(final Runnable run) {
|
||||
+ final PrioritisedExecutor.PrioritisedTask ret = this.createInfiniteRadiusTask(run, PrioritisedExecutor.Priority.NORMAL);
|
||||
+
|
||||
+ ret.queue();
|
||||
+
|
||||
+ return ret;
|
||||
+ }
|
||||
+
|
||||
+ // all accesses must be synchronised by the radius aware object
|
||||
+ private static final class DependencyTree {
|
||||
+
|
||||
+ private final RadiusAwarePrioritisedExecutor scheduler;
|
||||
+ private final PrioritisedExecutor executor;
|
||||
+ private final int maxToSchedule;
|
||||
+ private final int treeIndex;
|
||||
+
|
||||
+ private int currentlyExecuting;
|
||||
+ private long idGenerator;
|
||||
+
|
||||
+ private final PriorityQueue<DependencyNode> awaiting = new PriorityQueue<>(DEPENDENCY_NODE_COMPARATOR);
|
||||
+
|
||||
+ private final PriorityQueue<DependencyNode> infiniteRadius = new PriorityQueue<>(DEPENDENCY_NODE_COMPARATOR);
|
||||
+ private boolean isInfiniteRadiusScheduled;
|
||||
+
|
||||
+ private final Long2ReferenceOpenHashMap<DependencyNode> nodeByPosition = new Long2ReferenceOpenHashMap<>();
|
||||
+
|
||||
+ public DependencyTree(final RadiusAwarePrioritisedExecutor scheduler, final PrioritisedExecutor executor,
|
||||
+ final int maxToSchedule, final int treeIndex) {
|
||||
+ this.scheduler = scheduler;
|
||||
+ this.executor = executor;
|
||||
+ this.maxToSchedule = maxToSchedule;
|
||||
+ this.treeIndex = treeIndex;
|
||||
+ }
|
||||
+
|
||||
+ public boolean hasWaitingTasks() {
|
||||
+ return !this.awaiting.isEmpty() || !this.infiniteRadius.isEmpty();
|
||||
+ }
|
||||
+
|
||||
+ private long nextId() {
|
||||
+ return this.idGenerator++;
|
||||
+ }
|
||||
+
|
||||
+ private boolean isExecutingAnyTasks() {
|
||||
+ return this.currentlyExecuting != 0;
|
||||
+ }
|
||||
+
|
||||
+ private void pushNode(final DependencyNode node) {
|
||||
+ if (!node.task.isFiniteRadius()) {
|
||||
+ this.infiniteRadius.add(node);
|
||||
+ return;
|
||||
+ }
|
||||
+
|
||||
+ // set up dependency for node
|
||||
+ final Task task = node.task;
|
||||
+
|
||||
+ final int centerX = task.chunkX;
|
||||
+ final int centerZ = task.chunkZ;
|
||||
+ final int radius = task.radius;
|
||||
+
|
||||
+ final int minX = centerX - radius;
|
||||
+ final int maxX = centerX + radius;
|
||||
+
|
||||
+ final int minZ = centerZ - radius;
|
||||
+ final int maxZ = centerZ + radius;
|
||||
+
|
||||
+ ReferenceOpenHashSet<DependencyNode> parents = null;
|
||||
+ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
|
||||
+ for (int currX = minX; currX <= maxX; ++currX) {
|
||||
+ final DependencyNode dependency = this.nodeByPosition.put(CoordinateUtils.getChunkKey(currX, currZ), node);
|
||||
+ if (dependency != null) {
|
||||
+ if (parents == null) {
|
||||
+ parents = new ReferenceOpenHashSet<>();
|
||||
+ }
|
||||
+ if (parents.add(dependency)) {
|
||||
+ // added a dependency, so we need to add as a child to the dependency
|
||||
+ if (dependency.children == null) {
|
||||
+ dependency.children = new ArrayList<>();
|
||||
+ }
|
||||
+ dependency.children.add(node);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (parents == null) {
|
||||
+ // no dependencies, add straight to awaiting
|
||||
+ this.awaiting.add(node);
|
||||
+ } else {
|
||||
+ node.parents = parents;
|
||||
+ // we will be added to awaiting once we have no parents
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ // called only when a node is returned after being executed
|
||||
+ private List<PrioritisedExecutor.PrioritisedTask> returnNode(final DependencyNode node) {
|
||||
+ final Task task = node.task;
|
||||
+
|
||||
+ // now that the task is completed, we can push its children to the awaiting queue
|
||||
+ this.pushChildren(node);
|
||||
+
|
||||
+ if (task.isFiniteRadius()) {
|
||||
+ // remove from dependency map
|
||||
+ this.removeNodeFromMap(node);
|
||||
+ } else {
|
||||
+ // mark as no longer executing infinite radius
|
||||
+ if (!this.isInfiniteRadiusScheduled) {
|
||||
+ throw new IllegalStateException();
|
||||
+ }
|
||||
+ this.isInfiniteRadiusScheduled = false;
|
||||
+ }
|
||||
+
|
||||
+ // decrement executing count, we are done executing this task
|
||||
+ --this.currentlyExecuting;
|
||||
+
|
||||
+ if (this.currentlyExecuting == 0) {
|
||||
+ return this.scheduler.treeFinished();
|
||||
+ }
|
||||
+
|
||||
+ return this.scheduler.canQueueTasks() ? this.tryPushTasks() : null;
|
||||
+ }
|
||||
+
|
||||
+ private List<PrioritisedExecutor.PrioritisedTask> tryPushTasks() {
|
||||
+ // tasks are not queued, but only created here - we do hold the lock for the map
|
||||
+ List<PrioritisedExecutor.PrioritisedTask> ret = null;
|
||||
+ PrioritisedExecutor.PrioritisedTask pushedTask;
|
||||
+ while ((pushedTask = this.tryPushTask()) != null) {
|
||||
+ if (ret == null) {
|
||||
+ ret = new ArrayList<>();
|
||||
+ }
|
||||
+ ret.add(pushedTask);
|
||||
+ }
|
||||
+
|
||||
+ return ret;
|
||||
+ }
|
||||
+
|
||||
+ private void removeNodeFromMap(final DependencyNode node) {
|
||||
+ final Task task = node.task;
|
||||
+
|
||||
+ final int centerX = task.chunkX;
|
||||
+ final int centerZ = task.chunkZ;
|
||||
+ final int radius = task.radius;
|
||||
+
|
||||
+ final int minX = centerX - radius;
|
||||
+ final int maxX = centerX + radius;
|
||||
+
|
||||
+ final int minZ = centerZ - radius;
|
||||
+ final int maxZ = centerZ + radius;
|
||||
+
|
||||
+ for (int currZ = minZ; currZ <= maxZ; ++currZ) {
|
||||
+ for (int currX = minX; currX <= maxX; ++currX) {
|
||||
+ this.nodeByPosition.remove(CoordinateUtils.getChunkKey(currX, currZ), node);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ private void pushChildren(final DependencyNode node) {
|
||||
+ // add all the children that we can into awaiting
|
||||
+ final List<DependencyNode> children = node.children;
|
||||
+ if (children != null) {
|
||||
+ for (int i = 0, len = children.size(); i < len; ++i) {
|
||||
+ final DependencyNode child = children.get(i);
|
||||
+ if (!child.parents.remove(node)) {
|
||||
+ throw new IllegalStateException();
|
||||
+ }
|
||||
+ if (child.parents.isEmpty()) {
|
||||
+ // no more dependents, we can push to awaiting
|
||||
+ child.parents = null;
|
||||
+ // even if the child is purged, we need to push it so that its children will be pushed
|
||||
+ this.awaiting.add(child);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ private DependencyNode pollAwaiting() {
|
||||
+ final DependencyNode ret = this.awaiting.poll();
|
||||
+ if (ret == null) {
|
||||
+ return ret;
|
||||
+ }
|
||||
+
|
||||
+ if (ret.parents != null) {
|
||||
+ throw new IllegalStateException();
|
||||
+ }
|
||||
+
|
||||
+ if (ret.purged) {
|
||||
+ // need to manually remove from state here
|
||||
+ this.pushChildren(ret);
|
||||
+ this.removeNodeFromMap(ret);
|
||||
+ } // else: delay children push until the task has finished
|
||||
+
|
||||
+ return ret;
|
||||
+ }
|
||||
+
|
||||
+ private DependencyNode pollInfinite() {
|
||||
+ return this.infiniteRadius.poll();
|
||||
+ }
|
||||
+
|
||||
+ public PrioritisedExecutor.PrioritisedTask tryPushTask() {
|
||||
+ if (this.currentlyExecuting >= this.maxToSchedule || this.isInfiniteRadiusScheduled) {
|
||||
+ return null;
|
||||
+ }
|
||||
+
|
||||
+ DependencyNode firstInfinite;
|
||||
+ while ((firstInfinite = this.infiniteRadius.peek()) != null && firstInfinite.purged) {
|
||||
+ this.pollInfinite();
|
||||
+ }
|
||||
+
|
||||
+ DependencyNode firstAwaiting;
|
||||
+ while ((firstAwaiting = this.awaiting.peek()) != null && firstAwaiting.purged) {
|
||||
+ this.pollAwaiting();
|
||||
+ }
|
||||
+
|
||||
+ if (firstInfinite == null && firstAwaiting == null) {
|
||||
+ return null;
|
||||
+ }
|
||||
+
|
||||
+ // firstAwaiting compared to firstInfinite
|
||||
+ final int compare;
|
||||
+
|
||||
+ if (firstAwaiting == null) {
|
||||
+ // we choose first infinite, or infinite < awaiting
|
||||
+ compare = 1;
|
||||
+ } else if (firstInfinite == null) {
|
||||
+ // we choose first awaiting, or awaiting < infinite
|
||||
+ compare = -1;
|
||||
+ } else {
|
||||
+ compare = DEPENDENCY_NODE_COMPARATOR.compare(firstAwaiting, firstInfinite);
|
||||
+ }
|
||||
+
|
||||
+ if (compare >= 0) {
|
||||
+ if (this.currentlyExecuting != 0) {
|
||||
+ // don't queue infinite task while other tasks are executing in parallel
|
||||
+ return null;
|
||||
+ }
|
||||
+ ++this.currentlyExecuting;
|
||||
+ this.pollInfinite();
|
||||
+ this.isInfiniteRadiusScheduled = true;
|
||||
+ return firstInfinite.task.pushTask(this.executor);
|
||||
+ } else {
|
||||
+ ++this.currentlyExecuting;
|
||||
+ this.pollAwaiting();
|
||||
+ return firstAwaiting.task.pushTask(this.executor);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ private static final class DependencyNode {
|
||||
+
|
||||
+ private final Task task;
|
||||
+ private final DependencyTree tree;
|
||||
+
|
||||
+ // dependency tree fields
|
||||
+ // (must hold lock on the scheduler to use)
|
||||
+ // null is the same as empty, we just use it so that we don't allocate the set unless we need to
|
||||
+ private List<DependencyNode> children;
|
||||
+ // null is the same as empty, indicating that this task is considered "awaiting"
|
||||
+ private ReferenceOpenHashSet<DependencyNode> parents;
|
||||
+ // false -> scheduled and not cancelled
|
||||
+ // true -> scheduled but cancelled
|
||||
+ private boolean purged;
|
||||
+ private final long id;
|
||||
+
|
||||
+ public DependencyNode(final Task task, final DependencyTree tree) {
|
||||
+ this.task = task;
|
||||
+ this.id = tree.nextId();
|
||||
+ this.tree = tree;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ private static final class Task implements PrioritisedExecutor.PrioritisedTask, Runnable {
|
||||
+
|
||||
+ // task specific fields
|
||||
+ private final RadiusAwarePrioritisedExecutor scheduler;
|
||||
+ private final int chunkX;
|
||||
+ private final int chunkZ;
|
||||
+ private final int radius;
|
||||
+ private Runnable run;
|
||||
+ private PrioritisedExecutor.Priority priority;
|
||||
+
|
||||
+ private DependencyNode dependencyNode;
|
||||
+ private PrioritisedExecutor.PrioritisedTask queuedTask;
|
||||
+
|
||||
+ private Task(final RadiusAwarePrioritisedExecutor scheduler, final int chunkX, final int chunkZ, final int radius,
|
||||
+ final Runnable run, final PrioritisedExecutor.Priority priority) {
|
||||
+ this.scheduler = scheduler;
|
||||
+ this.chunkX = chunkX;
|
||||
+ this.chunkZ = chunkZ;
|
||||
+ this.radius = radius;
|
||||
+ this.run = run;
|
||||
+ this.priority = priority;
|
||||
+ }
|
||||
+
|
||||
+ private boolean isFiniteRadius() {
|
||||
+ return this.radius >= 0;
|
||||
+ }
|
||||
+
|
||||
+ private PrioritisedExecutor.PrioritisedTask pushTask(final PrioritisedExecutor executor) {
|
||||
+ return this.queuedTask = executor.createTask(this, this.priority);
|
||||
+ }
|
||||
+
|
||||
+ private void executeTask() {
|
||||
+ final Runnable run = this.run;
|
||||
+ this.run = null;
|
||||
+ run.run();
|
||||
+ }
|
||||
+
|
||||
+ private static void scheduleTasks(final List<PrioritisedExecutor.PrioritisedTask> toSchedule) {
|
||||
+ if (toSchedule != null) {
|
||||
+ for (int i = 0, len = toSchedule.size(); i < len; ++i) {
|
||||
+ toSchedule.get(i).queue();
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ private void returnNode() {
|
||||
+ final List<PrioritisedExecutor.PrioritisedTask> toSchedule;
|
||||
+ synchronized (this.scheduler) {
|
||||
+ final DependencyNode node = this.dependencyNode;
|
||||
+ this.dependencyNode = null;
|
||||
+ toSchedule = node.tree.returnNode(node);
|
||||
+ }
|
||||
+
|
||||
+ scheduleTasks(toSchedule);
|
||||
+ }
|
||||
+
|
||||
+ @Override
|
||||
+ public void run() {
|
||||
+ final Runnable run = this.run;
|
||||
+ this.run = null;
|
||||
+ try {
|
||||
+ run.run();
|
||||
+ } finally {
|
||||
+ this.returnNode();
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ @Override
|
||||
+ public boolean queue() {
|
||||
+ final List<PrioritisedExecutor.PrioritisedTask> toSchedule;
|
||||
+ synchronized (this.scheduler) {
|
||||
+ if (this.queuedTask != null || this.dependencyNode != null || this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ toSchedule = this.scheduler.queue(this, this.priority);
|
||||
+ }
|
||||
+
|
||||
+ scheduleTasks(toSchedule);
|
||||
+ return true;
|
||||
+ }
|
||||
+
|
||||
+ @Override
|
||||
+ public boolean cancel() {
|
||||
+ final PrioritisedExecutor.PrioritisedTask task;
|
||||
+ synchronized (this.scheduler) {
|
||||
+ if ((task = this.queuedTask) == null) {
|
||||
+ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ this.priority = PrioritisedExecutor.Priority.COMPLETING;
|
||||
+ if (this.dependencyNode != null) {
|
||||
+ this.dependencyNode.purged = true;
|
||||
+ this.dependencyNode = null;
|
||||
+ }
|
||||
+
|
||||
+ return true;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (task.cancel()) {
|
||||
+ // must manually return the node
|
||||
+ this.run = null;
|
||||
+ this.returnNode();
|
||||
+ return true;
|
||||
+ }
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ @Override
|
||||
+ public boolean execute() {
|
||||
+ final PrioritisedExecutor.PrioritisedTask task;
|
||||
+ synchronized (this.scheduler) {
|
||||
+ if ((task = this.queuedTask) == null) {
|
||||
+ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ this.priority = PrioritisedExecutor.Priority.COMPLETING;
|
||||
+ if (this.dependencyNode != null) {
|
||||
+ this.dependencyNode.purged = true;
|
||||
+ this.dependencyNode = null;
|
||||
+ }
|
||||
+ // fall through to execution logic
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (task != null) {
|
||||
+ // will run the return node logic automatically
|
||||
+ return task.execute();
|
||||
+ } else {
|
||||
+ // don't run node removal/insertion logic, we aren't actually removed from the dependency tree
|
||||
+ this.executeTask();
|
||||
+ return true;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ @Override
|
||||
+ public PrioritisedExecutor.Priority getPriority() {
|
||||
+ final PrioritisedExecutor.PrioritisedTask task;
|
||||
+ synchronized (this.scheduler) {
|
||||
+ if ((task = this.queuedTask) == null) {
|
||||
+ return this.priority;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return task.getPriority();
|
||||
+ }
|
||||
+
|
||||
+ @Override
|
||||
+ public boolean setPriority(final PrioritisedExecutor.Priority priority) {
|
||||
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
||||
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
||||
+ }
|
||||
+
|
||||
+ final PrioritisedExecutor.PrioritisedTask task;
|
||||
+ List<PrioritisedExecutor.PrioritisedTask> toSchedule = null;
|
||||
+ synchronized (this.scheduler) {
|
||||
+ if ((task = this.queuedTask) == null) {
|
||||
+ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ if (this.priority == priority) {
|
||||
+ return true;
|
||||
+ }
|
||||
+
|
||||
+ this.priority = priority;
|
||||
+ if (this.dependencyNode != null) {
|
||||
+ // need to re-insert node
|
||||
+ this.dependencyNode.purged = true;
|
||||
+ this.dependencyNode = null;
|
||||
+ toSchedule = this.scheduler.queue(this, priority);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (task != null) {
|
||||
+ return task.setPriority(priority);
|
||||
+ }
|
||||
+
|
||||
+ scheduleTasks(toSchedule);
|
||||
+
|
||||
+ return true;
|
||||
+ }
|
||||
+
|
||||
+ @Override
|
||||
+ public boolean raisePriority(final PrioritisedExecutor.Priority priority) {
|
||||
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
||||
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
||||
+ }
|
||||
+
|
||||
+ final PrioritisedExecutor.PrioritisedTask task;
|
||||
+ List<PrioritisedExecutor.PrioritisedTask> toSchedule = null;
|
||||
+ synchronized (this.scheduler) {
|
||||
+ if ((task = this.queuedTask) == null) {
|
||||
+ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ if (this.priority.isHigherOrEqualPriority(priority)) {
|
||||
+ return true;
|
||||
+ }
|
||||
+
|
||||
+ this.priority = priority;
|
||||
+ if (this.dependencyNode != null) {
|
||||
+ // need to re-insert node
|
||||
+ this.dependencyNode.purged = true;
|
||||
+ this.dependencyNode = null;
|
||||
+ toSchedule = this.scheduler.queue(this, priority);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (task != null) {
|
||||
+ return task.raisePriority(priority);
|
||||
+ }
|
||||
+
|
||||
+ scheduleTasks(toSchedule);
|
||||
+
|
||||
+ return true;
|
||||
+ }
|
||||
+
|
||||
+ @Override
|
||||
+ public boolean lowerPriority(final PrioritisedExecutor.Priority priority) {
|
||||
+ if (!PrioritisedExecutor.Priority.isValidPriority(priority)) {
|
||||
+ throw new IllegalArgumentException("Invalid priority " + priority);
|
||||
+ }
|
||||
+
|
||||
+ final PrioritisedExecutor.PrioritisedTask task;
|
||||
+ List<PrioritisedExecutor.PrioritisedTask> toSchedule = null;
|
||||
+ synchronized (this.scheduler) {
|
||||
+ if ((task = this.queuedTask) == null) {
|
||||
+ if (this.priority == PrioritisedExecutor.Priority.COMPLETING) {
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
+ if (this.priority.isLowerOrEqualPriority(priority)) {
|
||||
+ return true;
|
||||
+ }
|
||||
+
|
||||
+ this.priority = priority;
|
||||
+ if (this.dependencyNode != null) {
|
||||
+ // need to re-insert node
|
||||
+ this.dependencyNode.purged = true;
|
||||
+ this.dependencyNode = null;
|
||||
+ toSchedule = this.scheduler.queue(this, priority);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (task != null) {
|
||||
+ return task.lowerPriority(priority);
|
||||
+ }
|
||||
+
|
||||
+ scheduleTasks(toSchedule);
|
||||
+
|
||||
+ return true;
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
diff --git a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
|
||||
index c709e27a00d8617f9a3346f85bd88ce47baa9c76..b4be02ec4bb77059f79d3e4d6a6f1ee4843a01f9 100644
|
||||
--- a/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
|
||||
+++ b/src/main/java/net/minecraft/server/level/ThreadedLevelLightEngine.java
|
||||
@@ -94,7 +94,7 @@ public class ThreadedLevelLightEngine extends LevelLightEngine implements AutoCl
|
||||
++totalChunks;
|
||||
}
|
||||
|
||||
- this.chunkMap.level.chunkTaskScheduler.lightExecutor.queueRunnable(() -> { // Paper - rewrite chunk system
|
||||
+ this.chunkMap.level.chunkTaskScheduler.radiusAwareScheduler.queueInfiniteRadiusTask(() -> { // Paper - rewrite chunk system
|
||||
this.theLightEngine.relightChunks(chunks, (ChunkPos chunkPos) -> {
|
||||
chunkLightCallback.accept(chunkPos);
|
||||
((java.util.concurrent.Executor)((ServerLevel)this.theLightEngine.getWorld()).getChunkSource().mainThreadProcessor).execute(() -> {
|
|
@ -1,71 +0,0 @@
|
|||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
|
||||
Date: Mon, 15 May 2023 12:24:17 -0700
|
||||
Subject: [PATCH] Properly cancel chunk load tasks that were not scheduled
|
||||
|
||||
Since the chunk load task was not scheduled, the entity/poi load
|
||||
task fields will not be set, but the task complete counter
|
||||
will not be adjusted. Thus, the chunk load task will not complete.
|
||||
|
||||
To resolve this, detect when the entity/poi tasks were not scheduled
|
||||
and decrement the task complete counter in such cases.
|
||||
|
||||
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
|
||||
index 1f7c146ff0b2a835c818f49da6c1f1411f26aa39..34dc2153e90a29bc9102d9497c3c53b5de15508e 100644
|
||||
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
|
||||
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/ChunkLoadTask.java
|
||||
@@ -25,7 +25,6 @@ import org.slf4j.Logger;
|
||||
import java.lang.invoke.VarHandle;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
-import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
@@ -125,8 +124,12 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
@Override
|
||||
public void cancel() {
|
||||
// must be before load task access, so we can synchronise with the writes to the fields
|
||||
+ final boolean scheduled;
|
||||
this.scheduler.schedulingLock.lock();
|
||||
try {
|
||||
+ // fix cancellation of chunk load task - must read field here, as it may be written later conucrrently -
|
||||
+ // we need to know if we scheduled _before_ cancellation
|
||||
+ scheduled = this.scheduled;
|
||||
this.cancelled = true;
|
||||
} finally {
|
||||
this.scheduler.schedulingLock.unlock();
|
||||
@@ -139,15 +142,26 @@ public final class ChunkLoadTask extends ChunkProgressionTask {
|
||||
the chunk load task attempts to complete with a non-null value
|
||||
*/
|
||||
|
||||
- if (this.entityLoadTask != null) {
|
||||
- if (this.entityLoadTask.cancel()) {
|
||||
- this.tryCompleteLoad();
|
||||
+ if (scheduled) {
|
||||
+ // since we scheduled, we need to cancel the tasks
|
||||
+ if (this.entityLoadTask != null) {
|
||||
+ if (this.entityLoadTask.cancel()) {
|
||||
+ this.tryCompleteLoad();
|
||||
+ }
|
||||
}
|
||||
- }
|
||||
- if (this.poiLoadTask != null) {
|
||||
- if (this.poiLoadTask.cancel()) {
|
||||
- this.tryCompleteLoad();
|
||||
+ if (this.poiLoadTask != null) {
|
||||
+ if (this.poiLoadTask.cancel()) {
|
||||
+ this.tryCompleteLoad();
|
||||
+ }
|
||||
}
|
||||
+ } else {
|
||||
+ // since nothing was scheduled, we need to decrement the task count here ourselves
|
||||
+
|
||||
+ // for entity load task
|
||||
+ this.tryCompleteLoad();
|
||||
+
|
||||
+ // for poi load task
|
||||
+ this.tryCompleteLoad();
|
||||
}
|
||||
this.loadTask.cancel();
|
||||
}
|
|
@ -1,110 +0,0 @@
|
|||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
|
||||
Date: Mon, 15 May 2023 11:34:28 -0700
|
||||
Subject: [PATCH] Mark POI/Entity load tasks as completed before releasing
|
||||
scheduling lock
|
||||
|
||||
It must be marked as completed during that lock hold since the
|
||||
waiters field is set to null. Thus, any other thread attempting
|
||||
a cancellation will fail to remove from waiters. Also, any
|
||||
other thread attempting to cancel may set the completed field
|
||||
to true which would cause accept() to fail as well.
|
||||
|
||||
Completion was always designed to happen while holding the
|
||||
scheduling lock to prevent these race conditions. The code
|
||||
was originally set up to complete while not holding the
|
||||
scheduling lock to avoid invoking callbacks while holding the
|
||||
lock, however the access to the completion field was not
|
||||
considered.
|
||||
|
||||
Resolve this by marking the callback as completed during the
|
||||
lock, but invoking the accept() function after releasing
|
||||
the lock. This will prevent any cancellation attempts to be
|
||||
blocked, and allow the current thread to complete the callback
|
||||
without any issues.
|
||||
|
||||
diff --git a/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
|
||||
index 8013dd333e27aa5fd0beb431fa32491eec9f5246..efc9b7a304f10b6a23a36cffb0a4aaea6ab71129 100644
|
||||
--- a/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
|
||||
+++ b/src/main/java/io/papermc/paper/chunk/system/scheduling/NewChunkHolder.java
|
||||
@@ -156,6 +156,12 @@ public final class NewChunkHolder {
|
||||
LOGGER.error("Unhandled entity data load exception, data data will be lost: ", result.right());
|
||||
}
|
||||
|
||||
+ // Folia start - mark these tasks as completed before releasing the scheduling lock
|
||||
+ for (final GenericDataLoadTaskCallback callback : waiters) {
|
||||
+ callback.markCompleted();
|
||||
+ }
|
||||
+ // Folia end - mark these tasks as completed before releasing the scheduling lock
|
||||
+
|
||||
completeWaiters = waiters;
|
||||
} else {
|
||||
// cancelled
|
||||
@@ -187,7 +193,7 @@ public final class NewChunkHolder {
|
||||
// avoid holding the scheduling lock while completing
|
||||
if (completeWaiters != null) {
|
||||
for (final GenericDataLoadTaskCallback callback : completeWaiters) {
|
||||
- callback.accept(result);
|
||||
+ callback.acceptCompleted(result); // Folia - mark these tasks as completed before releasing the scheduling lock
|
||||
}
|
||||
}
|
||||
|
||||
@@ -273,6 +279,12 @@ public final class NewChunkHolder {
|
||||
LOGGER.error("Unhandled poi load exception, poi data will be lost: ", result.right());
|
||||
}
|
||||
|
||||
+ // Folia start - mark these tasks as completed before releasing the scheduling lock
|
||||
+ for (final GenericDataLoadTaskCallback callback : waiters) {
|
||||
+ callback.markCompleted();
|
||||
+ }
|
||||
+ // Folia end - mark these tasks as completed before releasing the scheduling lock
|
||||
+
|
||||
completeWaiters = waiters;
|
||||
} else {
|
||||
// cancelled
|
||||
@@ -304,7 +316,7 @@ public final class NewChunkHolder {
|
||||
// avoid holding the scheduling lock while completing
|
||||
if (completeWaiters != null) {
|
||||
for (final GenericDataLoadTaskCallback callback : completeWaiters) {
|
||||
- callback.accept(result);
|
||||
+ callback.acceptCompleted(result); // Folia - mark these tasks as completed before releasing the scheduling lock
|
||||
}
|
||||
}
|
||||
this.scheduler.schedulingLock.lock();
|
||||
@@ -357,7 +369,7 @@ public final class NewChunkHolder {
|
||||
}
|
||||
}
|
||||
|
||||
- public static abstract class GenericDataLoadTaskCallback implements Cancellable, Consumer<GenericDataLoadTask.TaskResult<?, Throwable>> {
|
||||
+ public static abstract class GenericDataLoadTaskCallback implements Cancellable { // Folia - mark callbacks as completed before unlocking scheduling lock
|
||||
|
||||
protected final Consumer<GenericDataLoadTask.TaskResult<?, Throwable>> consumer;
|
||||
protected final NewChunkHolder chunkHolder;
|
||||
@@ -393,13 +405,23 @@ public final class NewChunkHolder {
|
||||
return this.completed = true;
|
||||
}
|
||||
|
||||
- @Override
|
||||
- public void accept(final GenericDataLoadTask.TaskResult<?, Throwable> result) {
|
||||
+ // Folia start - mark callbacks as completed before unlocking scheduling lock
|
||||
+ // must hold scheduling lock
|
||||
+ void markCompleted() {
|
||||
+ if (this.completed) {
|
||||
+ throw new IllegalStateException("May not be completed here");
|
||||
+ }
|
||||
+ this.completed = true;
|
||||
+ }
|
||||
+ // Folia end - mark callbacks as completed before unlocking scheduling lock
|
||||
+
|
||||
+ // Folia - mark callbacks as completed before unlocking scheduling lock
|
||||
+ void acceptCompleted(final GenericDataLoadTask.TaskResult<?, Throwable> result) {
|
||||
if (result != null) {
|
||||
- if (this.setCompleted()) {
|
||||
+ if (this.completed) { // Folia - mark callbacks as completed before unlocking scheduling lock
|
||||
this.consumer.accept(result);
|
||||
} else {
|
||||
- throw new IllegalStateException("Cannot be cancelled at this point");
|
||||
+ throw new IllegalStateException("Cannot be uncompleted at this point"); // Folia - mark callbacks as completed before unlocking scheduling lock
|
||||
}
|
||||
} else {
|
||||
throw new NullPointerException("Result cannot be null (cancelled)");
|
|
@ -1,119 +0,0 @@
|
|||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
|
||||
Date: Thu, 2 Mar 2023 23:19:04 -0800
|
||||
Subject: [PATCH] Cache whether region files do not exist
|
||||
|
||||
The repeated I/O of creating the directory for the regionfile
|
||||
or for checking if the file exists can be heavy in
|
||||
when pushing chunk generation extremely hard - as each chunk gen
|
||||
request may effectively go through to the I/O thread.
|
||||
|
||||
diff --git a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
|
||||
index a08cde4eefe879adcee7c4118bc38f98c5097ed0..8a11e10b01fa012b2f98b1c193c53251e848f909 100644
|
||||
--- a/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
|
||||
+++ b/src/main/java/io/papermc/paper/chunk/system/io/RegionFileIOThread.java
|
||||
@@ -819,8 +819,14 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
|
||||
return file.hasChunk(chunkPos) ? Boolean.TRUE : Boolean.FALSE;
|
||||
});
|
||||
} else {
|
||||
+ // first check if the region file for sure does not exist
|
||||
+ if (taskController.doesRegionFileNotExist(chunkX, chunkZ)) {
|
||||
+ return Boolean.FALSE;
|
||||
+ } // else: it either exists or is not known, fall back to checking the loaded region file
|
||||
+
|
||||
return taskController.computeForRegionFileIfLoaded(chunkX, chunkZ, (final RegionFile file) -> {
|
||||
if (file == null) { // null if not loaded
|
||||
+ // not sure at this point, let the I/O thread figure it out
|
||||
return Boolean.TRUE;
|
||||
}
|
||||
|
||||
@@ -1116,6 +1122,10 @@ public final class RegionFileIOThread extends PrioritisedQueueExecutorThread {
|
||||
return !this.tasks.isEmpty();
|
||||
}
|
||||
|
||||
+ public boolean doesRegionFileNotExist(final int chunkX, final int chunkZ) {
|
||||
+ return this.getCache().doesRegionFileNotExistNoIO(new ChunkPos(chunkX, chunkZ));
|
||||
+ }
|
||||
+
|
||||
public <T> T computeForRegionFile(final int chunkX, final int chunkZ, final boolean existingOnly, final Function<RegionFile, T> function) {
|
||||
final RegionFileStorage cache = this.getCache();
|
||||
final RegionFile regionFile;
|
||||
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
||||
index 18ef7025f7f4dc2a4aff85ca65ff5a2d35a1ef06..fe8bb0037bb7f317fc32ac34461f4eb3a1f397f2 100644
|
||||
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
||||
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
||||
@@ -24,6 +24,35 @@ public class RegionFileStorage implements AutoCloseable {
|
||||
private final Path folder;
|
||||
private final boolean sync;
|
||||
|
||||
+ // Paper start - cache regionfile does not exist state
|
||||
+ static final int MAX_NON_EXISTING_CACHE = 1024 * 64;
|
||||
+ private final it.unimi.dsi.fastutil.longs.LongLinkedOpenHashSet nonExistingRegionFiles = new it.unimi.dsi.fastutil.longs.LongLinkedOpenHashSet();
|
||||
+ private synchronized boolean doesRegionFilePossiblyExist(long position) {
|
||||
+ if (this.nonExistingRegionFiles.contains(position)) {
|
||||
+ this.nonExistingRegionFiles.addAndMoveToFirst(position);
|
||||
+ return false;
|
||||
+ }
|
||||
+ return true;
|
||||
+ }
|
||||
+
|
||||
+ private synchronized void createRegionFile(long position) {
|
||||
+ this.nonExistingRegionFiles.remove(position);
|
||||
+ }
|
||||
+
|
||||
+ private synchronized void markNonExisting(long position) {
|
||||
+ if (this.nonExistingRegionFiles.addAndMoveToFirst(position)) {
|
||||
+ while (this.nonExistingRegionFiles.size() >= MAX_NON_EXISTING_CACHE) {
|
||||
+ this.nonExistingRegionFiles.removeLastLong();
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ public synchronized boolean doesRegionFileNotExistNoIO(ChunkPos pos) {
|
||||
+ long key = ChunkPos.asLong(pos.getRegionX(), pos.getRegionZ());
|
||||
+ return !this.doesRegionFilePossiblyExist(key);
|
||||
+ }
|
||||
+ // Paper end - cache regionfile does not exist state
|
||||
+
|
||||
protected RegionFileStorage(Path directory, boolean dsync) { // Paper - protected constructor
|
||||
this.folder = directory;
|
||||
this.sync = dsync;
|
||||
@@ -45,7 +74,7 @@ public class RegionFileStorage implements AutoCloseable {
|
||||
}
|
||||
public synchronized RegionFile getRegionFile(ChunkPos chunkcoordintpair, boolean existingOnly, boolean lock) throws IOException {
|
||||
// Paper end
|
||||
- long i = ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ());
|
||||
+ long i = ChunkPos.asLong(chunkcoordintpair.getRegionX(), chunkcoordintpair.getRegionZ()); final long regionPos = i; // Paper - OBFHELPER
|
||||
RegionFile regionfile = (RegionFile) this.regionCache.getAndMoveToFirst(i);
|
||||
|
||||
if (regionfile != null) {
|
||||
@@ -57,15 +86,27 @@ public class RegionFileStorage implements AutoCloseable {
|
||||
// Paper end
|
||||
return regionfile;
|
||||
} else {
|
||||
+ // Paper start - cache regionfile does not exist state
|
||||
+ if (existingOnly && !this.doesRegionFilePossiblyExist(regionPos)) {
|
||||
+ return null;
|
||||
+ }
|
||||
+ // Paper end - cache regionfile does not exist state
|
||||
if (this.regionCache.size() >= 256) {
|
||||
((RegionFile) this.regionCache.removeLast()).close();
|
||||
}
|
||||
|
||||
- FileUtil.createDirectoriesSafe(this.folder);
|
||||
+ // Paper - only create directory if not existing only - moved down
|
||||
Path path = this.folder;
|
||||
int j = chunkcoordintpair.getRegionX();
|
||||
Path path1 = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca");
|
||||
- if (existingOnly && !java.nio.file.Files.exists(path1)) return null; // CraftBukkit
|
||||
+ if (existingOnly && !java.nio.file.Files.exists(path1)) { // Paper start - cache regionfile does not exist state
|
||||
+ this.markNonExisting(regionPos);
|
||||
+ return null; // CraftBukkit
|
||||
+ } else {
|
||||
+ this.createRegionFile(regionPos);
|
||||
+ }
|
||||
+ // Paper end - cache regionfile does not exist state
|
||||
+ FileUtil.createDirectoriesSafe(this.folder); // Paper - only create directory if not existing only - moved from above
|
||||
RegionFile regionfile1 = new RegionFile(path1, this.folder, this.sync);
|
||||
|
||||
this.regionCache.putAndMoveToFirst(i, regionfile1);
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue