mirror of
https://github.com/PaperMC/Paper.git
synced 2024-12-28 23:38:25 +01:00
even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even even more patches
This commit is contained in:
parent
28459aeafe
commit
437205bba8
14 changed files with 148 additions and 389 deletions
|
@ -91,14 +91,14 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
|
||||
}
|
||||
|
||||
private void doSendPacket(Packet<?> packet, @Nullable GenericFutureListener<? extends Future<? super Void>> callback, ConnectionProtocol enumprotocol, ConnectionProtocol enumprotocol1) {
|
||||
private void doSendPacket(Packet<?> packet, @Nullable GenericFutureListener<? extends Future<? super Void>> callback, ConnectionProtocol packetState, ConnectionProtocol currentState) {
|
||||
+ // Paper start - add flush parameter
|
||||
+ this.doSendPacket(packet, callback, enumprotocol, enumprotocol1, true);
|
||||
+ this.doSendPacket(packet, callback, packetState, currentState, true);
|
||||
+ }
|
||||
+ private void doSendPacket(Packet<?> packet, @Nullable GenericFutureListener<? extends Future<? super Void>> callback, ConnectionProtocol enumprotocol, ConnectionProtocol enumprotocol1, boolean flush) {
|
||||
+ private void doSendPacket(Packet<?> packet, @Nullable GenericFutureListener<? extends Future<? super Void>> callback, ConnectionProtocol packetState, ConnectionProtocol currentState, boolean flush) {
|
||||
+ // Paper end - add flush parameter
|
||||
if (enumprotocol != enumprotocol1) {
|
||||
this.setProtocol(enumprotocol);
|
||||
if (packetState != currentState) {
|
||||
this.setProtocol(packetState);
|
||||
}
|
||||
@@ -0,0 +0,0 @@ public class Connection extends SimpleChannelInboundHandler<Packet<?>> {
|
||||
|
|
@ -13,38 +13,28 @@ diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializ
|
|||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
|
||||
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkSerializer.java
|
||||
@@ -0,0 +0,0 @@ public class ChunkSerializer {
|
||||
private static final String SKYLIGHT_STATE_TAG = "starlight.skylight_state";
|
||||
private static final String STARLIGHT_VERSION_TAG = "starlight.light_version";
|
||||
// Paper end - replace light engine impl
|
||||
@@ -0,0 +0,0 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
public class ChunkSerializer {
|
||||
+ // Paper start
|
||||
+ // TODO: Check on update
|
||||
+ public static long getLastWorldSaveTime(CompoundTag chunkData) {
|
||||
+ CompoundTag levelData = chunkData.getCompound("Level");
|
||||
+ return levelData.getLong("LastUpdate");
|
||||
+ return chunkData.getLong("LastUpdate");
|
||||
+ }
|
||||
+ // Paper end
|
||||
|
||||
public static final Codec<PalettedContainer<BlockState>> BLOCK_STATE_CODEC = PalettedContainer.codec(Block.BLOCK_STATE_REGISTRY, BlockState.CODEC, PalettedContainer.Strategy.SECTION_STATES, Blocks.AIR.defaultBlockState());
|
||||
private static final Logger LOGGER = LogManager.getLogger();
|
||||
public static final String TAG_UPGRADE_DATA = "UpgradeData";
|
||||
@@ -0,0 +0,0 @@ public class ChunkSerializer {
|
||||
}
|
||||
// Paper end
|
||||
BiomeSource worldchunkmanager = chunkgenerator.getBiomeSource();
|
||||
- CompoundTag nbttagcompound1 = nbt.getCompound("Level"); // Paper - diff on change, see ChunkSerializer#getChunkCoordinate
|
||||
+ CompoundTag nbttagcompound1 = nbt.getCompound("Level"); // Paper - diff on change, see ChunkSerializer#getChunkCoordinate // Paper - diff on change
|
||||
ChunkPos chunkcoordintpair1 = new ChunkPos(nbttagcompound1.getInt("xPos"), nbttagcompound1.getInt("zPos")); // Paper - diff on change, see ChunkSerializer#getChunkCoordinate
|
||||
|
||||
if (!Objects.equals(pos, chunkcoordintpair1)) {
|
||||
@@ -0,0 +0,0 @@ public class ChunkSerializer {
|
||||
nbttagcompound.put("Level", nbttagcompound1);
|
||||
nbttagcompound1.putInt("xPos", chunkcoordintpair.x);
|
||||
nbttagcompound1.putInt("zPos", chunkcoordintpair.z);
|
||||
- nbttagcompound1.putLong("LastUpdate", asyncsavedata != null ? asyncsavedata.worldTime : world.getGameTime()); // Paper - async chunk unloading
|
||||
+ nbttagcompound1.putLong("LastUpdate", asyncsavedata != null ? asyncsavedata.worldTime : world.getGameTime()); // Paper - async chunk unloading // Paper - diff on change
|
||||
nbttagcompound1.putLong("InhabitedTime", chunk.getInhabitedTime());
|
||||
nbttagcompound1.putString("Status", chunk.getStatus().getName());
|
||||
UpgradeData chunkconverter = chunk.getUpgradeData();
|
||||
nbttagcompound.putInt("xPos", chunkcoordintpair.x);
|
||||
nbttagcompound.putInt("yPos", chunk.getMinSection());
|
||||
nbttagcompound.putInt("zPos", chunkcoordintpair.z);
|
||||
- nbttagcompound.putLong("LastUpdate", asyncsavedata != null ? asyncsavedata.worldTime : world.getGameTime()); // Paper - async chunk unloading
|
||||
+ nbttagcompound.putLong("LastUpdate", asyncsavedata != null ? asyncsavedata.worldTime : world.getGameTime()); // Paper - async chunk unloading // Paper - diff on change
|
||||
nbttagcompound.putLong("InhabitedTime", chunk.getInhabitedTime());
|
||||
nbttagcompound.putString("Status", chunk.getStatus().getName());
|
||||
BlendingData blendingdata = chunk.getBlendingData();
|
||||
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
|
||||
|
@ -96,7 +86,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFile.java
|
||||
@@ -0,0 +0,0 @@ public class RegionFile implements AutoCloseable {
|
||||
public final java.util.concurrent.locks.ReentrantLock fileLock = new java.util.concurrent.locks.ReentrantLock(true); // Paper
|
||||
public final File regionFile; // Paper
|
||||
public final Path regionFile; // Paper
|
||||
|
||||
+ // Paper start - try to recover from RegionFile header corruption
|
||||
+ private static long roundToSectors(long bytes) {
|
||||
|
@ -140,7 +130,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+
|
||||
+ InputStream input = compression.wrap(new ByteArrayInputStream(chunkData.array(), chunkData.position(), chunkDataLength - chunkData.position()));
|
||||
+
|
||||
+ return NbtIo.read((java.io.DataInput)new DataInputStream(new BufferedInputStream(input)));
|
||||
+ return NbtIo.read(new DataInputStream(input));
|
||||
+ } catch (Exception ex) {
|
||||
+ return null;
|
||||
+ }
|
||||
|
@ -156,18 +146,18 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ }
|
||||
+
|
||||
+ private void backupRegionFile() {
|
||||
+ File backup = new File(this.regionFile.getParent(), this.regionFile.getName() + "." + new java.util.Random().nextLong() + ".backup");
|
||||
+ Path backup = this.regionFile.getParent().resolve(this.regionFile.getFileName() + "." + new java.util.Random().nextLong() + ".backup");
|
||||
+ this.backupRegionFile(backup);
|
||||
+ }
|
||||
+
|
||||
+ private void backupRegionFile(File to) {
|
||||
+ private void backupRegionFile(Path to) {
|
||||
+ try {
|
||||
+ this.file.force(true);
|
||||
+ LOGGER.warn("Backing up regionfile \"" + this.regionFile.getAbsolutePath() + "\" to " + to.getAbsolutePath());
|
||||
+ java.nio.file.Files.copy(this.regionFile.toPath(), to.toPath());
|
||||
+ LOGGER.warn("Backed up the regionfile to " + to.getAbsolutePath());
|
||||
+ LOGGER.warn("Backing up regionfile \"" + this.regionFile.toAbsolutePath() + "\" to " + to.toAbsolutePath());
|
||||
+ java.nio.file.Files.copy(this.regionFile, to);
|
||||
+ LOGGER.warn("Backed up the regionfile to " + to.toAbsolutePath());
|
||||
+ } catch (IOException ex) {
|
||||
+ LOGGER.error("Failed to backup to " + to.getAbsolutePath(), ex);
|
||||
+ LOGGER.error("Failed to backup to " + to.toAbsolutePath(), ex);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
|
@ -182,11 +172,11 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ }
|
||||
+ ChunkPos ourLowerLeftPosition = RegionFileStorage.getRegionFileCoordinates(this.regionFile);
|
||||
+ if (ourLowerLeftPosition == null) {
|
||||
+ LOGGER.fatal("Unable to get chunk location of regionfile " + this.regionFile.getAbsolutePath() + ", cannot recover header");
|
||||
+ LOGGER.fatal("Unable to get chunk location of regionfile " + this.regionFile.toAbsolutePath() + ", cannot recover header");
|
||||
+ return false;
|
||||
+ }
|
||||
+ synchronized (this) {
|
||||
+ LOGGER.warn("Corrupt regionfile header detected! Attempting to re-calculate header offsets for regionfile " + this.regionFile.getAbsolutePath(), new Throwable());
|
||||
+ LOGGER.warn("Corrupt regionfile header detected! Attempting to re-calculate header offsets for regionfile " + this.regionFile.toAbsolutePath(), new Throwable());
|
||||
+
|
||||
+ // try to backup file so maybe it could be sent to us for further investigation
|
||||
+
|
||||
|
@ -210,7 +200,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+
|
||||
+ ChunkPos chunkPos = ChunkSerializer.getChunkCoordinate(compound);
|
||||
+ if (!inSameRegionfile(ourLowerLeftPosition, chunkPos)) {
|
||||
+ LOGGER.error("Ignoring absolute chunk " + chunkPos + " in regionfile as it is not contained in the bounds of the regionfile '" + this.regionFile.getAbsolutePath() + "'. It should be in regionfile (" + (chunkPos.x >> 5) + "," + (chunkPos.z >> 5) + ")");
|
||||
+ LOGGER.error("Ignoring absolute chunk " + chunkPos + " in regionfile as it is not contained in the bounds of the regionfile '" + this.regionFile.toAbsolutePath() + "'. It should be in regionfile (" + (chunkPos.x >> 5) + "," + (chunkPos.z >> 5) + ")");
|
||||
+ continue;
|
||||
+ }
|
||||
+ int location = (chunkPos.x & 31) | ((chunkPos.z & 31) << 5);
|
||||
|
@ -222,9 +212,9 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ }
|
||||
+
|
||||
+ // aikar oversized?
|
||||
+ File aikarOversizedFile = this.getOversizedFile(chunkPos.x, chunkPos.z);
|
||||
+ Path aikarOversizedFile = this.getOversizedFile(chunkPos.x, chunkPos.z);
|
||||
+ boolean isAikarOversized = false;
|
||||
+ if (aikarOversizedFile.exists()) {
|
||||
+ if (Files.exists(aikarOversizedFile)) {
|
||||
+ try {
|
||||
+ CompoundTag aikarOversizedCompound = this.getOversizedData(chunkPos.x, chunkPos.z);
|
||||
+ if (ChunkSerializer.getLastWorldSaveTime(compound) == ChunkSerializer.getLastWorldSaveTime(aikarOversizedCompound)) {
|
||||
|
@ -232,7 +222,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ isAikarOversized = true;
|
||||
+ }
|
||||
+ } catch (Exception ex) {
|
||||
+ LOGGER.error("Failed to read aikar oversized data for absolute chunk (" + chunkPos.x + "," + chunkPos.z + ") in regionfile " + this.regionFile.getAbsolutePath() + ", oversized data for this chunk will be lost", ex);
|
||||
+ LOGGER.error("Failed to read aikar oversized data for absolute chunk (" + chunkPos.x + "," + chunkPos.z + ") in regionfile " + this.regionFile.toAbsolutePath() + ", oversized data for this chunk will be lost", ex);
|
||||
+ // fall through, if we can't read aikar oversized we can't risk corrupting chunk data
|
||||
+ }
|
||||
+ }
|
||||
|
@ -252,7 +242,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ // local data compound
|
||||
+
|
||||
+ java.nio.file.Path containingFolder = this.externalFileDir;
|
||||
+ File[] regionFiles = containingFolder.toFile().listFiles();
|
||||
+ Path[] regionFiles = Files.list(containingFolder).toArray(Path[]::new);
|
||||
+ boolean[] oversized = new boolean[32 * 32];
|
||||
+ RegionFileVersion[] oversizedCompressionTypes = new RegionFileVersion[32 * 32];
|
||||
+
|
||||
|
@ -263,7 +253,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ int upperZBound = lowerZBound + 32 - 1; // inclusive
|
||||
+
|
||||
+ // read mojang oversized data
|
||||
+ for (File regionFile : regionFiles) {
|
||||
+ for (Path regionFile : regionFiles) {
|
||||
+ ChunkPos oversizedCoords = getOversizedChunkPair(regionFile);
|
||||
+ if (oversizedCoords == null) {
|
||||
+ continue;
|
||||
|
@ -279,9 +269,9 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+
|
||||
+ byte[] chunkData;
|
||||
+ try {
|
||||
+ chunkData = Files.readAllBytes(regionFile.toPath());
|
||||
+ chunkData = Files.readAllBytes(regionFile);
|
||||
+ } catch (Exception ex) {
|
||||
+ LOGGER.error("Failed to read oversized chunk data in file " + regionFile.getAbsolutePath() + ", data will be lost", ex);
|
||||
+ LOGGER.error("Failed to read oversized chunk data in file " + regionFile.toAbsolutePath() + ", data will be lost", ex);
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
|
@ -291,7 +281,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ RegionFileVersion compression = null;
|
||||
+ for (RegionFileVersion compressionType : RegionFileVersion.VERSIONS.values()) {
|
||||
+ try {
|
||||
+ DataInputStream in = new DataInputStream(new BufferedInputStream(compressionType.wrap(new ByteArrayInputStream(chunkData)))); // typical java
|
||||
+ DataInputStream in = new DataInputStream(compressionType.wrap(new ByteArrayInputStream(chunkData))); // typical java
|
||||
+ compound = NbtIo.read((java.io.DataInput)in);
|
||||
+ compression = compressionType;
|
||||
+ break; // reaches here iff readNBT does not throw
|
||||
|
@ -301,12 +291,12 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ }
|
||||
+
|
||||
+ if (compound == null) {
|
||||
+ LOGGER.error("Failed to read oversized chunk data in file " + regionFile.getAbsolutePath() + ", it's corrupt. Its data will be lost");
|
||||
+ LOGGER.error("Failed to read oversized chunk data in file " + regionFile.toAbsolutePath() + ", it's corrupt. Its data will be lost");
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
+ if (!ChunkSerializer.getChunkCoordinate(compound).equals(oversizedCoords)) {
|
||||
+ LOGGER.error("Can't use oversized chunk stored in " + regionFile.getAbsolutePath() + ", got absolute chunkpos: " + ChunkSerializer.getChunkCoordinate(compound) + ", expected " + oversizedCoords);
|
||||
+ LOGGER.error("Can't use oversized chunk stored in " + regionFile.toAbsolutePath() + ", got absolute chunkpos: " + ChunkSerializer.getChunkCoordinate(compound) + ", expected " + oversizedCoords);
|
||||
+ continue;
|
||||
+ }
|
||||
+
|
||||
|
@ -340,7 +330,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ if (newSectorAllocations.tryAllocate(sectorOffset, sectorLength)) {
|
||||
+ calculatedOffsets[location] = sectorOffset << 8 | (sectorLength > 255 ? 255 : sectorLength); // support forge style oversized
|
||||
+ } else {
|
||||
+ LOGGER.error("Failed to allocate space for local chunk (overlapping data??) at (" + chunkX + "," + chunkZ + ") in regionfile " + this.regionFile.getAbsolutePath() + ", chunk will be regenerated");
|
||||
+ LOGGER.error("Failed to allocate space for local chunk (overlapping data??) at (" + chunkX + "," + chunkZ + ") in regionfile " + this.regionFile.toAbsolutePath() + ", chunk will be regenerated");
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
|
@ -364,7 +354,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ calculatedOffsets[location] = sectorOffset << 8 | (sectorLength > 255 ? 255 : sectorLength); // support forge style oversized
|
||||
+ } catch (IOException ex) {
|
||||
+ newSectorAllocations.free(sectorOffset, sectorLength);
|
||||
+ LOGGER.error("Failed to write new oversized chunk data holder, local chunk at (" + chunkX + "," + chunkZ + ") in regionfile " + this.regionFile.getAbsolutePath() + " will be regenerated");
|
||||
+ LOGGER.error("Failed to write new oversized chunk data holder, local chunk at (" + chunkX + "," + chunkZ + ") in regionfile " + this.regionFile.toAbsolutePath() + " will be regenerated");
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
|
@ -386,18 +376,18 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ try {
|
||||
+ this.writeOversizedMeta();
|
||||
+ } catch (Exception ex) {
|
||||
+ LOGGER.error("Failed to write aikar oversized chunk meta, all aikar style oversized chunk data will be lost for regionfile " + this.regionFile.getAbsolutePath(), ex);
|
||||
+ this.getOversizedMetaFile().delete();
|
||||
+ LOGGER.error("Failed to write aikar oversized chunk meta, all aikar style oversized chunk data will be lost for regionfile " + this.regionFile.toAbsolutePath(), ex);
|
||||
+ Files.deleteIfExists(this.getOversizedMetaFile());
|
||||
+ }
|
||||
+ } else {
|
||||
+ this.getOversizedMetaFile().delete();
|
||||
+ Files.deleteIfExists(this.getOversizedMetaFile());
|
||||
+ }
|
||||
+
|
||||
+ this.usedSectors.copyFrom(newSectorAllocations);
|
||||
+
|
||||
+ // before we overwrite the old sectors, print a summary of the chunks that got changed.
|
||||
+
|
||||
+ LOGGER.info("Starting summary of changes for regionfile " + this.regionFile.getAbsolutePath());
|
||||
+ LOGGER.info("Starting summary of changes for regionfile " + this.regionFile.toAbsolutePath());
|
||||
+
|
||||
+ for (int chunkX = 0; chunkX < 32; ++chunkX) {
|
||||
+ for (int chunkZ = 0; chunkZ < 32; ++chunkZ) {
|
||||
|
@ -414,16 +404,16 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+
|
||||
+ if (oldOffset == 0) {
|
||||
+ // found lost data
|
||||
+ LOGGER.info("Found missing data for local chunk (" + chunkX + "," + chunkZ + ") in regionfile " + this.regionFile.getAbsolutePath());
|
||||
+ LOGGER.info("Found missing data for local chunk (" + chunkX + "," + chunkZ + ") in regionfile " + this.regionFile.toAbsolutePath());
|
||||
+ } else if (newOffset == 0) {
|
||||
+ LOGGER.warn("Data for local chunk (" + chunkX + "," + chunkZ + ") could not be recovered in regionfile " + this.regionFile.getAbsolutePath() + ", it will be regenerated");
|
||||
+ LOGGER.warn("Data for local chunk (" + chunkX + "," + chunkZ + ") could not be recovered in regionfile " + this.regionFile.toAbsolutePath() + ", it will be regenerated");
|
||||
+ } else {
|
||||
+ LOGGER.info("Local chunk (" + chunkX + "," + chunkZ + ") changed to point to newer data or correct chunk in regionfile " + this.regionFile.getAbsolutePath());
|
||||
+ LOGGER.info("Local chunk (" + chunkX + "," + chunkZ + ") changed to point to newer data or correct chunk in regionfile " + this.regionFile.toAbsolutePath());
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ LOGGER.info("End of change summary for regionfile " + this.regionFile.getAbsolutePath());
|
||||
+ LOGGER.info("End of change summary for regionfile " + this.regionFile.toAbsolutePath());
|
||||
+
|
||||
+ // simply destroy the timestamp header, it's not used
|
||||
+
|
||||
|
@ -435,9 +425,9 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ try {
|
||||
+ this.flush();
|
||||
+ this.file.force(true); // try to ensure it goes through...
|
||||
+ LOGGER.info("Successfully wrote new header to disk for regionfile " + this.regionFile.getAbsolutePath());
|
||||
+ LOGGER.info("Successfully wrote new header to disk for regionfile " + this.regionFile.toAbsolutePath());
|
||||
+ } catch (IOException ex) {
|
||||
+ LOGGER.fatal("Failed to write new header to disk for regionfile " + this.regionFile.getAbsolutePath(), ex);
|
||||
+ LOGGER.fatal("Failed to write new header to disk for regionfile " + this.regionFile.toAbsolutePath(), ex);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
|
@ -451,12 +441,12 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
private final ChunkStatus[] statuses = new ChunkStatus[32 * 32];
|
||||
|
||||
@@ -0,0 +0,0 @@ public class RegionFile implements AutoCloseable {
|
||||
public RegionFile(File file, File directory, boolean dsync) throws IOException {
|
||||
this(file.toPath(), directory.toPath(), RegionFileVersion.VERSION_DEFLATE, dsync);
|
||||
public RegionFile(Path file, Path directory, boolean dsync) throws IOException {
|
||||
this(file, directory, RegionFileVersion.VERSION_DEFLATE, dsync);
|
||||
}
|
||||
+ // Paper start - add can recalc flag
|
||||
+ public RegionFile(File file, File directory, boolean dsync, boolean canRecalcHeader) throws IOException {
|
||||
+ this(file.toPath(), directory.toPath(), RegionFileVersion.VERSION_DEFLATE, dsync, canRecalcHeader);
|
||||
+ public RegionFile(Path file, Path directory, boolean dsync, boolean canRecalcHeader) throws IOException {
|
||||
+ this(file, directory, RegionFileVersion.VERSION_DEFLATE, dsync, canRecalcHeader);
|
||||
+ }
|
||||
+ // Paper end - add can recalc flag
|
||||
|
||||
|
@ -468,7 +458,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ this.canRecalcHeader = canRecalcHeader;
|
||||
+ // Paper end - add can recalc flag
|
||||
this.header = ByteBuffer.allocateDirect(8192);
|
||||
this.regionFile = file.toFile(); // Paper
|
||||
this.regionFile = file; // Paper
|
||||
initOversizedState(); // Paper
|
||||
@@ -0,0 +0,0 @@ public class RegionFile implements AutoCloseable {
|
||||
RegionFile.LOGGER.warn("Region file {} has truncated header: {}", file, i);
|
||||
|
@ -516,12 +506,12 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ // Paper start - recalculate header on header corruption
|
||||
+ if (offset < 2 || sectorLength <= 0 || ((long)offset * 4096L) > regionFileSize) {
|
||||
+ if (canRecalcHeader) {
|
||||
+ LOGGER.error("Detected invalid header for regionfile " + this.regionFile.getAbsolutePath() + "! Recalculating header...");
|
||||
+ LOGGER.error("Detected invalid header for regionfile " + this.regionFile.toAbsolutePath() + "! Recalculating header...");
|
||||
+ needsHeaderRecalc = true;
|
||||
+ break;
|
||||
+ } else {
|
||||
+ // location = chunkX | (chunkZ << 5);
|
||||
+ LOGGER.fatal("Detected invalid header for regionfile " + this.regionFile.getAbsolutePath() +
|
||||
+ LOGGER.fatal("Detected invalid header for regionfile " + this.regionFile.toAbsolutePath() +
|
||||
+ "! Cannot recalculate, removing local chunk (" + (headerLocation & 31) + "," + (headerLocation >>> 5) + ") from header");
|
||||
+ if (!hasBackedUp) {
|
||||
+ hasBackedUp = true;
|
||||
|
@ -534,11 +524,11 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ }
|
||||
+ boolean failedToAllocate = !this.usedSectors.tryAllocate(offset, sectorLength);
|
||||
+ if (failedToAllocate) {
|
||||
+ LOGGER.error("Overlapping allocation by local chunk (" + (headerLocation & 31) + "," + (headerLocation >>> 5) + ") in regionfile " + this.regionFile.getAbsolutePath());
|
||||
+ LOGGER.error("Overlapping allocation by local chunk (" + (headerLocation & 31) + "," + (headerLocation >>> 5) + ") in regionfile " + this.regionFile.toAbsolutePath());
|
||||
}
|
||||
+ if (failedToAllocate & !canRecalcHeader) {
|
||||
+ // location = chunkX | (chunkZ << 5);
|
||||
+ LOGGER.fatal("Detected invalid header for regionfile " + this.regionFile.getAbsolutePath() +
|
||||
+ LOGGER.fatal("Detected invalid header for regionfile " + this.regionFile.toAbsolutePath() +
|
||||
+ "! Cannot recalculate, removing local chunk (" + (headerLocation & 31) + "," + (headerLocation >>> 5) + ") from header");
|
||||
+ if (!hasBackedUp) {
|
||||
+ hasBackedUp = true;
|
||||
|
@ -555,7 +545,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ // Paper start - recalculate header on header corruption
|
||||
+ // we move the recalc here so comparison to old header is correct when logging to console
|
||||
+ if (needsHeaderRecalc) { // true if header gave us overlapping allocations or had other issues
|
||||
+ LOGGER.error("Recalculating regionfile " + this.regionFile.getAbsolutePath() + ", header gave erroneous offsets & locations");
|
||||
+ LOGGER.error("Recalculating regionfile " + this.regionFile.toAbsolutePath() + ", header gave erroneous offsets & locations");
|
||||
+ this.recalculateHeader();
|
||||
+ }
|
||||
+ // Paper end
|
||||
|
@ -572,8 +562,8 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
}
|
||||
|
||||
+ // Paper start
|
||||
+ private static ChunkPos getOversizedChunkPair(File file) {
|
||||
+ String fileName = file.getName();
|
||||
+ private static ChunkPos getOversizedChunkPair(Path file) {
|
||||
+ String fileName = file.getFileName().toString();
|
||||
+
|
||||
+ if (!fileName.startsWith("c.") || !fileName.endsWith(".mcc")) {
|
||||
+ return null;
|
||||
|
@ -692,30 +682,30 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
||||
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileStorage.java
|
||||
@@ -0,0 +0,0 @@ public class RegionFileStorage implements AutoCloseable {
|
||||
private final File folder;
|
||||
private final Path folder;
|
||||
private final boolean sync;
|
||||
|
||||
+ private final boolean isChunkData; // Paper
|
||||
+
|
||||
RegionFileStorage(File directory, boolean dsync) {
|
||||
RegionFileStorage(Path directory, boolean dsync) {
|
||||
+ // Paper start - add isChunkData param
|
||||
+ this(directory, dsync, false);
|
||||
+ }
|
||||
+ RegionFileStorage(File directory, boolean dsync, boolean isChunkData) {
|
||||
+ RegionFileStorage(Path directory, boolean dsync, boolean isChunkData) {
|
||||
+ this.isChunkData = isChunkData;
|
||||
+ // Paper end - add isChunkData param
|
||||
this.folder = directory;
|
||||
this.sync = dsync;
|
||||
}
|
||||
@@ -0,0 +0,0 @@ public class RegionFileStorage implements AutoCloseable {
|
||||
|
||||
File file = this.folder;
|
||||
Files.createDirectories(this.folder);
|
||||
Path path = this.folder;
|
||||
int j = chunkcoordintpair.getRegionX();
|
||||
- File file1 = new File(file, "r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca");
|
||||
+ File file1 = new File(file, "r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca"); // Paper - diff on change
|
||||
if (existingOnly && !file1.exists()) return null; // CraftBukkit
|
||||
- RegionFile regionfile1 = new RegionFile(file1, this.folder, this.sync);
|
||||
+ RegionFile regionfile1 = new RegionFile(file1, this.folder, this.sync, this.isChunkData); // Paper - allow for chunk regionfiles to regen header
|
||||
- Path path1 = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca");
|
||||
+ Path path1 = path.resolve("r." + j + "." + chunkcoordintpair.getRegionZ() + ".mca"); // Paper - diff on change
|
||||
if (existingOnly && !Files.exists(path1)) return null; // CraftBukkit
|
||||
- RegionFile regionfile1 = new RegionFile(path1, this.folder, this.sync);
|
||||
+ RegionFile regionfile1 = new RegionFile(path1, this.folder, this.sync, this.isChunkData); // Paper - allow for chunk regionfiles to regen header
|
||||
|
||||
this.regionCache.putAndMoveToFirst(i, regionfile1);
|
||||
// Paper start
|
||||
|
@ -741,12 +731,12 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ if (this.isChunkData) {
|
||||
+ ChunkPos chunkPos = ChunkSerializer.getChunkCoordinate(nbttagcompound);
|
||||
+ if (!chunkPos.equals(pos)) {
|
||||
+ MinecraftServer.LOGGER.error("Attempting to read chunk data at " + pos.toString() + " but got chunk data for " + chunkPos.toString() + " instead! Attempting regionfile recalculation for regionfile " + regionfile.regionFile.getAbsolutePath());
|
||||
+ net.minecraft.server.MinecraftServer.LOGGER.error("Attempting to read chunk data at " + pos + " but got chunk data for " + chunkPos + " instead! Attempting regionfile recalculation for regionfile " + regionfile.regionFile.toAbsolutePath());
|
||||
+ if (regionfile.recalculateHeader()) {
|
||||
+ regionfile.fileLock.lock(); // otherwise we will unlock twice and only lock once.
|
||||
+ return this.read(pos, regionfile);
|
||||
+ }
|
||||
+ MinecraftServer.LOGGER.fatal("Can't recalculate regionfile header, regenerating chunk " + pos.toString() + " for " + regionfile.regionFile.getAbsolutePath());
|
||||
+ net.minecraft.server.MinecraftServer.LOGGER.fatal("Can't recalculate regionfile header, regenerating chunk " + pos + " for " + regionfile.regionFile.toAbsolutePath());
|
||||
+ return null;
|
||||
+ }
|
||||
+ }
|
||||
|
@ -758,12 +748,12 @@ diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileVer
|
|||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileVersion.java
|
||||
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/RegionFileVersion.java
|
||||
@@ -0,0 +0,0 @@ import java.util.zip.InflaterInputStream;
|
||||
import javax.annotation.Nullable;
|
||||
@@ -0,0 +0,0 @@ import javax.annotation.Nullable;
|
||||
import net.minecraft.util.FastBufferedInputStream;
|
||||
|
||||
public class RegionFileVersion {
|
||||
- private static final Int2ObjectMap<RegionFileVersion> VERSIONS = new Int2ObjectOpenHashMap<>();
|
||||
+ public static final Int2ObjectMap<RegionFileVersion> VERSIONS = new Int2ObjectOpenHashMap<>(); // Paper - public
|
||||
public static final RegionFileVersion VERSION_GZIP = register(new RegionFileVersion(1, GZIPInputStream::new, GZIPOutputStream::new));
|
||||
public static final RegionFileVersion VERSION_DEFLATE = register(new RegionFileVersion(2, InflaterInputStream::new, DeflaterOutputStream::new));
|
||||
public static final RegionFileVersion VERSION_NONE = register(new RegionFileVersion(3, (inputStream) -> {
|
||||
public static final RegionFileVersion VERSION_GZIP = register(new RegionFileVersion(1, (inputStream) -> {
|
||||
return new FastBufferedInputStream(new GZIPInputStream(inputStream));
|
||||
}, (outputStream) -> {
|
|
@ -55,7 +55,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
@@ -0,0 +0,0 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
||||
boolean unloadingPlayerChunk = false; // Paper - do not allow ticket level changes while unloading chunks
|
||||
public ChunkMap(ServerLevel world, LevelStorageSource.LevelStorageAccess session, DataFixer dataFixer, StructureManager structureManager, Executor executor, BlockableEventLoop<Runnable> mainThreadExecutor, LightChunkGetter chunkProvider, ChunkGenerator chunkGenerator, ChunkProgressListener worldGenerationProgressListener, ChunkStatusUpdateListener chunkStatusChangeListener, Supplier<DimensionDataStorage> persistentStateManagerFactory, int viewDistance, boolean dsync) {
|
||||
super(new File(session.getDimensionPath(world.dimension()), "region"), dataFixer, dsync);
|
||||
super(session.getDimensionPath(world.dimension()).resolve("region"), dataFixer, dsync);
|
||||
- this.visibleChunkMap = this.updatingChunkMap.clone();
|
||||
+ // Paper - don't copy
|
||||
this.pendingUnloads = new Long2ObjectLinkedOpenHashMap();
|
||||
|
@ -100,14 +100,14 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
|
||||
do {
|
||||
@@ -0,0 +0,0 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
||||
//this.flushWorker(); // Paper - nuke IOWorker
|
||||
this.level.asyncChunkTaskManager.flush(); // Paper - flush to preserve behavior compat with pre-async behaviour
|
||||
// this.i(); // Paper - nuke IOWorker
|
||||
} else {
|
||||
- this.visibleChunkMap.values().stream().filter(ChunkHolder::wasAccessibleSinceLastSave).forEach((playerchunk) -> {
|
||||
+ this.updatingChunks.getVisibleValuesCopy().stream().filter(ChunkHolder::wasAccessibleSinceLastSave).forEach((playerchunk) -> { // Paper
|
||||
ChunkAccess ichunkaccess = (ChunkAccess) playerchunk.getChunkToSave().getNow(null); // CraftBukkit - decompile error
|
||||
- this.visibleChunkMap.values().forEach(this::saveChunkIfNeeded);
|
||||
+ this.updatingChunks.getVisibleValuesCopy().forEach(this::saveChunkIfNeeded); // Paper
|
||||
}
|
||||
|
||||
if (ichunkaccess instanceof ImposterProtoChunk || ichunkaccess instanceof LevelChunk) {
|
||||
}
|
||||
@@ -0,0 +0,0 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
||||
while (longiterator.hasNext()) { // Spigot
|
||||
long j = longiterator.nextLong();
|
||||
|
@ -117,6 +117,15 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
|
||||
if (playerchunk != null) {
|
||||
this.pendingUnloads.put(j, playerchunk);
|
||||
@@ -0,0 +0,0 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
||||
}
|
||||
|
||||
int l = 0;
|
||||
- ObjectIterator objectiterator = this.visibleChunkMap.values().iterator();
|
||||
+ Iterator objectiterator = this.updatingChunks.getVisibleValuesCopy().iterator(); // Paper
|
||||
|
||||
while (l < 20 && shouldKeepTicking.getAsBoolean() && objectiterator.hasNext()) {
|
||||
if (this.saveChunkIfNeeded((ChunkHolder) objectiterator.next())) {
|
||||
@@ -0,0 +0,0 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
||||
if (!this.modified) {
|
||||
return false;
|
||||
|
@ -132,6 +141,15 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
return true;
|
||||
}
|
||||
@@ -0,0 +0,0 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
||||
|
||||
this.viewDistance = j;
|
||||
this.distanceManager.updatePlayerTickets(this.viewDistance);
|
||||
- ObjectIterator objectiterator = this.updatingChunkMap.values().iterator();
|
||||
+ Iterator objectiterator = this.updatingChunks.getVisibleValuesCopy().iterator(); // Paper
|
||||
|
||||
while (objectiterator.hasNext()) {
|
||||
ChunkHolder playerchunk = (ChunkHolder) objectiterator.next();
|
||||
@@ -0,0 +0,0 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
||||
}
|
||||
|
||||
public int size() {
|
||||
|
@ -139,7 +157,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ return this.updatingChunks.getVisibleMap().size(); // Paper - Don't copy
|
||||
}
|
||||
|
||||
protected DistanceManager getDistanceManager() {
|
||||
public DistanceManager getDistanceManager() {
|
||||
@@ -0,0 +0,0 @@ public class ChunkMap extends ChunkStorage implements ChunkHolder.PlayerProvider
|
||||
}
|
||||
|
||||
|
@ -149,7 +167,8 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
}
|
||||
|
||||
void dumpChunks(Writer writer) throws IOException {
|
||||
CsvOutput csvwriter = CsvOutput.builder().addColumn("x").addColumn("z").addColumn("level").addColumn("in_memory").addColumn("status").addColumn("full_status").addColumn("accessible_ready").addColumn("ticking_ready").addColumn("entity_ticking_ready").addColumn("ticket").addColumn("spawning").addColumn("block_entity_count").build(writer);
|
||||
CsvOutput csvwriter = CsvOutput.builder().addColumn("x").addColumn("z").addColumn("level").addColumn("in_memory").addColumn("status").addColumn("full_status").addColumn("accessible_ready").addColumn("ticking_ready").addColumn("entity_ticking_ready").addColumn("ticket").addColumn("spawning").addColumn("block_entity_count").addColumn("ticking_ticket").addColumn("ticking_level").addColumn("block_ticks").addColumn("fluid_ticks").build(writer);
|
||||
TickingTracker tickingtracker = this.distanceManager.tickingTracker();
|
||||
- ObjectBidirectionalIterator objectbidirectionaliterator = this.visibleChunkMap.long2ObjectEntrySet().iterator();
|
||||
+ ObjectBidirectionalIterator objectbidirectionaliterator = this.updatingChunks.getVisibleMap().clone().long2ObjectEntrySet().fastIterator(); // Paper
|
||||
|
|
@ -18,18 +18,6 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
private static final Map<Class<?>, String> taskNameCache = new MapMaker().weakKeys().makeMap();
|
||||
|
||||
private MinecraftTimings() {}
|
||||
diff --git a/src/main/java/com/destroystokyo/paper/server/ticklist/PaperTickList.java b/src/main/java/com/destroystokyo/paper/server/ticklist/PaperTickList.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/com/destroystokyo/paper/server/ticklist/PaperTickList.java
|
||||
+++ b/src/main/java/com/destroystokyo/paper/server/ticklist/PaperTickList.java
|
||||
@@ -0,0 +0,0 @@ public final class PaperTickList<T> extends ServerTickList<T> { // extend to avo
|
||||
toTick.tickState = STATE_SCHEDULED;
|
||||
this.addToNotTickingReady(toTick);
|
||||
}
|
||||
+ MinecraftServer.getServer().executeMidTickTasks(); // Paper - exec chunk tasks during world tick
|
||||
} catch (final Throwable thr) {
|
||||
// start copy from TickListServer // TODO check on update
|
||||
CrashReport crashreport = CrashReport.forThrowable(thr, "Exception while ticking");
|
||||
diff --git a/src/main/java/net/minecraft/server/MinecraftServer.java b/src/main/java/net/minecraft/server/MinecraftServer.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/server/MinecraftServer.java
|
||||
|
@ -119,33 +107,13 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
return true;
|
||||
} else {
|
||||
if (this.haveTime()) {
|
||||
diff --git a/src/main/java/net/minecraft/server/level/ServerChunkCache.java b/src/main/java/net/minecraft/server/level/ServerChunkCache.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/server/level/ServerChunkCache.java
|
||||
+++ b/src/main/java/net/minecraft/server/level/ServerChunkCache.java
|
||||
@@ -0,0 +0,0 @@ public class ServerChunkCache extends ChunkSource {
|
||||
Collections.shuffle(shuffled);
|
||||
iterator = shuffled.iterator();
|
||||
}
|
||||
+ int chunksTicked = 0; // Paper
|
||||
try { while (iterator.hasNext()) {
|
||||
LevelChunk chunk = iterator.next();
|
||||
ChunkHolder playerchunk = chunk.playerChunk;
|
||||
@@ -0,0 +0,0 @@ public class ServerChunkCache extends ChunkSource {
|
||||
this.level.tickChunk(chunk, k);
|
||||
// this.level.timings.doTickTiles.stopTiming(); // Spigot // Paper
|
||||
}
|
||||
+ if ((chunksTicked++ & 1) == 0) net.minecraft.server.MinecraftServer.getServer().executeMidTickTasks(); // Paper
|
||||
}
|
||||
} // Paper start - optimise chunk tick iteration
|
||||
} finally {
|
||||
diff --git a/src/main/java/net/minecraft/server/level/ServerLevel.java b/src/main/java/net/minecraft/server/level/ServerLevel.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/server/level/ServerLevel.java
|
||||
+++ b/src/main/java/net/minecraft/server/level/ServerLevel.java
|
||||
@@ -0,0 +0,0 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
||||
final Int2ObjectMap<EnderDragonPart> dragonParts;
|
||||
private final StructureFeatureManager structureFeatureManager;
|
||||
private final StructureCheck structureCheck;
|
||||
private final boolean tickTime;
|
||||
-
|
||||
+ // Paper start - execute chunk tasks mid tick
|
|
@ -831,8 +831,8 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
- private final net.minecraft.server.level.ServerLevel world; // Paper
|
||||
+ public final net.minecraft.server.level.ServerLevel world; // Paper // Paper public
|
||||
|
||||
public PoiManager(File directory, DataFixer dataFixer, boolean dsync, LevelHeightAccessor world) {
|
||||
super(directory, PoiSection::codec, PoiSection::new, dataFixer, DataFixTypes.POI_CHUNK, dsync, world);
|
||||
public PoiManager(Path path, DataFixer dataFixer, boolean dsync, LevelHeightAccessor world) {
|
||||
super(path, PoiSection::codec, PoiSection::new, dataFixer, DataFixTypes.POI_CHUNK, dsync, world);
|
||||
@@ -0,0 +0,0 @@ public class PoiManager extends SectionStorage<PoiSection> {
|
||||
}
|
||||
|
||||
|
@ -854,12 +854,12 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ // Paper end - re-route to faster logic
|
||||
}
|
||||
|
||||
public Optional<BlockPos> findClosest(Predicate<PoiType> predicate, Predicate<BlockPos> predicate2, BlockPos blockPos, int i, PoiManager.Occupancy occupancy) {
|
||||
- return this.getInRange(predicate, blockPos, i, occupancy).map(PoiRecord::getPos).filter(predicate2).min(Comparator.comparingDouble((blockPos2) -> {
|
||||
- return blockPos2.distSqr(blockPos);
|
||||
public Optional<BlockPos> findClosest(Predicate<PoiType> typePredicate, Predicate<BlockPos> posPredicate, BlockPos pos, int radius, PoiManager.Occupancy occupationStatus) {
|
||||
- return this.getInRange(typePredicate, pos, radius, occupationStatus).map(PoiRecord::getPos).filter(posPredicate).min(Comparator.comparingDouble((blockPos2) -> {
|
||||
- return blockPos2.distSqr(pos);
|
||||
- }));
|
||||
+ // Paper start - re-route to faster logic
|
||||
+ BlockPos ret = io.papermc.paper.util.PoiAccess.findClosestPoiDataPosition(this, predicate, predicate2, blockPos, i, i*i, occupancy, false);
|
||||
+ BlockPos ret = io.papermc.paper.util.PoiAccess.findClosestPoiDataPosition(this, typePredicate, posPredicate, pos, radius, radius * radius, occupationStatus, false);
|
||||
+ return Optional.ofNullable(ret);
|
||||
+ // Paper end - re-route to faster logic
|
||||
}
|
||||
|
@ -886,8 +886,8 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
public Optional<BlockPos> getRandom(Predicate<PoiType> typePredicate, Predicate<BlockPos> positionPredicate, PoiManager.Occupancy occupationStatus, BlockPos pos, int radius, Random random) {
|
||||
- List<PoiRecord> list = this.getInRange(typePredicate, pos, radius, occupationStatus).collect(Collectors.toList());
|
||||
- Collections.shuffle(list, random);
|
||||
- return list.stream().filter((poiRecord) -> {
|
||||
- return positionPredicate.test(poiRecord.getPos());
|
||||
- return list.stream().filter((poi) -> {
|
||||
- return positionPredicate.test(poi.getPos());
|
||||
- }).findFirst().map(PoiRecord::getPos);
|
||||
+ // Paper start - re-route to faster logic
|
||||
+ List<PoiRecord> list = new java.util.ArrayList<>();
|
||||
|
@ -949,7 +949,9 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
- villageplace.ensureLoadedAndValid(this.level, blockposition, i);
|
||||
- Optional<PoiRecord> optional = villageplace.getInSquare((villageplacetype) -> {
|
||||
- return villageplacetype == PoiType.NETHER_PORTAL;
|
||||
- }, blockposition, i, PoiManager.Occupancy.ANY).sorted(Comparator.comparingDouble((PoiRecord villageplacerecord) -> { // CraftBukkit - decompile error
|
||||
- }, blockposition, i, PoiManager.Occupancy.ANY).filter((villageplacerecord) -> {
|
||||
- return worldborder.isWithinBounds(villageplacerecord.getPos());
|
||||
- }).sorted(Comparator.comparingDouble((PoiRecord villageplacerecord) -> { // CraftBukkit - decompile error
|
||||
- return villageplacerecord.getPos().distSqr(blockposition);
|
||||
- }).thenComparingInt((villageplacerecord) -> {
|
||||
- return villageplacerecord.getPos().getY();
|
||||
|
@ -970,6 +972,9 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ // why would we generate the chunk?
|
||||
+ return false;
|
||||
+ }
|
||||
+ if (!worldborder.isWithinBounds(pos)) {
|
||||
+ return false;
|
||||
+ }
|
||||
+ return lowest.getBlockState(pos).hasProperty(BlockStateProperties.HORIZONTAL_AXIS);
|
||||
+ },
|
||||
+ blockposition, i, Double.MAX_VALUE, PoiManager.Occupancy.ANY, true, records
|
|
@ -919,21 +919,22 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+++ b/src/main/java/net/minecraft/server/level/ServerLevel.java
|
||||
@@ -0,0 +0,0 @@ public class ServerLevel extends Level implements WorldGenLevel {
|
||||
DataFixer datafixer = minecraftserver.getFixerUpper();
|
||||
EntityPersistentStorage<Entity> entitypersistentstorage = new EntityStorage(this, new File(convertable_conversionsession.getDimensionPath(resourcekey), "entities"), datafixer, flag2, minecraftserver);
|
||||
EntityPersistentStorage<Entity> entitypersistentstorage = new EntityStorage(this, convertable_conversionsession.getDimensionPath(resourcekey).resolve("entities"), datafixer, flag2, minecraftserver);
|
||||
|
||||
- this.entityManager = new PersistentEntitySectionManager<>(Entity.class, new ServerLevel.EntityCallbacks(), entitypersistentstorage);
|
||||
+ this.entityManager = new PersistentEntitySectionManager<>(Entity.class, new ServerLevel.EntityCallbacks(), entitypersistentstorage, this.entitySliceManager); // Paper
|
||||
StructureManager definedstructuremanager = minecraftserver.getStructureManager();
|
||||
int j = this.spigotConfig.viewDistance; // Spigot
|
||||
PersistentEntitySectionManager persistententitysectionmanager = this.entityManager;
|
||||
int k = this.spigotConfig.simulationDistance; // Spigot
|
||||
diff --git a/src/main/java/net/minecraft/server/level/WorldGenRegion.java b/src/main/java/net/minecraft/server/level/WorldGenRegion.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/server/level/WorldGenRegion.java
|
||||
+++ b/src/main/java/net/minecraft/server/level/WorldGenRegion.java
|
||||
@@ -0,0 +0,0 @@ public class WorldGenRegion implements WorldGenLevel {
|
||||
@Nullable
|
||||
private Supplier<String> currentlyGenerating;
|
||||
|
||||
public long nextSubTickCount() {
|
||||
return this.subTickCount.getAndIncrement();
|
||||
}
|
||||
+
|
||||
+ // Paper start
|
||||
+ // No-op, this class doesn't provide entity access
|
||||
+ @Override
|
||||
|
@ -950,15 +951,12 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ @Override
|
||||
+ public <T> void getEntitiesByClass(Class<? extends T> clazz, Entity except, AABB box, List<? super T> into, Predicate<? super T> predicate) {}
|
||||
+ // Paper end
|
||||
+
|
||||
public WorldGenRegion(ServerLevel world, List<ChunkAccess> list, ChunkStatus chunkstatus, int i) {
|
||||
this.generatingStatus = chunkstatus;
|
||||
this.writeRadiusCutoff = i;
|
||||
}
|
||||
diff --git a/src/main/java/net/minecraft/world/entity/Entity.java b/src/main/java/net/minecraft/world/entity/Entity.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/world/entity/Entity.java
|
||||
+++ b/src/main/java/net/minecraft/world/entity/Entity.java
|
||||
@@ -0,0 +0,0 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource, n
|
||||
@@ -0,0 +0,0 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource, i
|
||||
}
|
||||
// Paper end - make end portalling safe
|
||||
|
||||
|
@ -1015,7 +1013,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
public Entity(EntityType<?> type, Level world) {
|
||||
this.id = Entity.ENTITY_COUNTER.incrementAndGet();
|
||||
this.passengers = ImmutableList.of();
|
||||
@@ -0,0 +0,0 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource, n
|
||||
@@ -0,0 +0,0 @@ public abstract class Entity implements Nameable, EntityAccess, CommandSource, i
|
||||
return InteractionResult.PASS;
|
||||
}
|
||||
|
||||
|
@ -1052,42 +1050,17 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
List<Entity> getEntities(@Nullable Entity except, AABB box, Predicate<? super Entity> predicate);
|
||||
|
||||
<T extends Entity> List<T> getEntities(EntityTypeTest<Entity, T> filter, AABB box, Predicate<? super T> predicate);
|
||||
@@ -0,0 +0,0 @@ public interface EntityGetter {
|
||||
return Stream.empty();
|
||||
} else {
|
||||
AABB aABB = box.inflate(1.0E-7D);
|
||||
- return this.getEntities(entity, aABB, predicate.and((entityx) -> {
|
||||
- if (entityx.getBoundingBox().intersects(aABB)) {
|
||||
+ Predicate<Entity> hardCollides = (entityx) -> { // Paper - optimise entity hard collisions
|
||||
+ if (true || entityx.getBoundingBox().intersects(aABB)) { // Paper - always true
|
||||
if (entity == null) {
|
||||
if (entityx.canBeCollidedWith()) {
|
||||
return true;
|
||||
@@ -0,0 +0,0 @@ public interface EntityGetter {
|
||||
}
|
||||
|
||||
return false;
|
||||
- })).stream().map(Entity::getBoundingBox).map(Shapes::create);
|
||||
+ }; // Paper start - optimise entity hard collisions
|
||||
+ predicate = predicate == null ? hardCollides : hardCollides.and(predicate);
|
||||
+ return (entity != null && entity.hardCollides() ? this.getEntities(entity, aABB, predicate) : this.getHardCollidingEntities(entity, aABB, predicate))
|
||||
+ .stream().map(Entity::getBoundingBox).map(Shapes::create);
|
||||
+ // Paper end - optimise entity hard collisions
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/src/main/java/net/minecraft/world/level/Level.java b/src/main/java/net/minecraft/world/level/Level.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/world/level/Level.java
|
||||
+++ b/src/main/java/net/minecraft/world/level/Level.java
|
||||
@@ -0,0 +0,0 @@ public abstract class Level implements LevelAccessor, AutoCloseable {
|
||||
return this.typeKey;
|
||||
}
|
||||
|
||||
public abstract ResourceKey<LevelStem> getTypeKey();
|
||||
|
||||
+ // Paper start
|
||||
+ protected final io.papermc.paper.world.EntitySliceManager entitySliceManager;
|
||||
+
|
||||
+ // Paper start - optimise CraftChunk#getEntities
|
||||
+ public org.bukkit.entity.Entity[] getChunkEntities(int chunkX, int chunkZ) {
|
||||
+ io.papermc.paper.world.ChunkEntitySlices slices = this.entitySliceManager.getChunk(chunkX, chunkZ);
|
||||
+ if (slices == null) {
|
||||
|
@ -1095,7 +1068,6 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ }
|
||||
+ return slices.getChunkEntities();
|
||||
+ }
|
||||
+ // Paper end - optimise CraftChunk#getEntities
|
||||
+
|
||||
+ @Override
|
||||
+ public List<Entity> getHardCollidingEntities(Entity except, AABB box, Predicate<? super Entity> predicate) {
|
||||
|
@ -1128,13 +1100,13 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
+ }
|
||||
+ // Paper end
|
||||
+
|
||||
protected Level(WritableLevelData worlddatamutable, ResourceKey<Level> resourcekey, final DimensionType dimensionmanager, Supplier<ProfilerFiller> supplier, boolean flag, boolean flag1, long i, org.bukkit.generator.ChunkGenerator gen, org.bukkit.generator.BiomeProvider biomeProvider, org.bukkit.World.Environment env, java.util.concurrent.Executor executor) { // Paper - Anti-Xray - Pass executor
|
||||
protected Level(WritableLevelData worlddatamutable, ResourceKey<Level> resourcekey, final DimensionType dimensionmanager, Supplier<ProfilerFiller> supplier, boolean flag, boolean flag1, long i, org.bukkit.generator.ChunkGenerator gen, org.bukkit.generator.BiomeProvider biomeProvider, org.bukkit.World.Environment env) {
|
||||
this.spigotConfig = new org.spigotmc.SpigotWorldConfig(((net.minecraft.world.level.storage.PrimaryLevelData) worlddatamutable).getLevelName()); // Spigot
|
||||
this.paperConfig = new com.destroystokyo.paper.PaperWorldConfig(((net.minecraft.world.level.storage.PrimaryLevelData) worlddatamutable).getLevelName(), this.spigotConfig); // Paper
|
||||
@@ -0,0 +0,0 @@ public abstract class Level implements LevelAccessor, AutoCloseable {
|
||||
this.chunkPacketBlockController = this.paperConfig.antiXray ?
|
||||
new com.destroystokyo.paper.antixray.ChunkPacketBlockControllerAntiXray(this, executor)
|
||||
: com.destroystokyo.paper.antixray.ChunkPacketBlockController.NO_OPERATION_INSTANCE; // Paper - Anti-Xray
|
||||
this.keepSpawnInMemory = this.paperConfig.keepSpawnInMemory; // Paper
|
||||
this.entityLimiter = new org.spigotmc.TickLimiter(spigotConfig.entityMaxTickTime);
|
||||
this.tileLimiter = new org.spigotmc.TickLimiter(spigotConfig.tileMaxTickTime);
|
||||
+ this.entitySliceManager = new io.papermc.paper.world.EntitySliceManager((ServerLevel)this); // Paper
|
||||
}
|
||||
|
||||
|
@ -1177,12 +1149,13 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
- }
|
||||
-
|
||||
- if (entity instanceof EnderDragon) {
|
||||
- EnderDragonPart[] aentitycomplexpart = ((EnderDragon) entity).getSubEntities();
|
||||
- EnderDragon entityenderdragon = (EnderDragon) entity;
|
||||
- EnderDragonPart[] aentitycomplexpart = entityenderdragon.getSubEntities();
|
||||
- int i = aentitycomplexpart.length;
|
||||
-
|
||||
- for (int j = 0; j < i; ++j) {
|
||||
- EnderDragonPart entitycomplexpart = aentitycomplexpart[j];
|
||||
- T t0 = filter.tryCast(entitycomplexpart);
|
||||
- T t0 = filter.tryCast(entitycomplexpart); // CraftBukkit - decompile error
|
||||
-
|
||||
- if (t0 != null && predicate.test(t0)) {
|
||||
- list.add(t0);
|
||||
|
@ -1228,7 +1201,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
@@ -0,0 +0,0 @@ public class PersistentEntitySectionManager<T extends EntityAccess> implements A
|
||||
EntitySection<T> entitysection = this.sectionStorage.getOrCreateSection(i);
|
||||
|
||||
entitysection.add(entity); // CraftBukkit - decompile error
|
||||
entitysection.add(entity);
|
||||
+ this.entitySliceManager.addEntity((Entity)entity); // Paper
|
||||
entity.setLevelCallback(new PersistentEntitySectionManager.Callback(entity, i, entitysection));
|
||||
if (!existing) {
|
||||
|
@ -1264,7 +1237,7 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
@@ -0,0 +0,0 @@ public class CraftChunk implements Chunk {
|
||||
long pair = ChunkPos.asLong(x, z);
|
||||
|
||||
if (entityManager.areEntitiesLoaded(pair)) { // PAIL rename isEntitiesLoaded
|
||||
if (entityManager.areEntitiesLoaded(pair)) {
|
||||
- return entityManager.getEntities(new ChunkPos(this.x, this.z)).stream()
|
||||
- .map(net.minecraft.world.entity.Entity::getBukkitEntity)
|
||||
- .filter(Objects::nonNull).toArray(Entity[]::new);
|
||||
|
@ -1288,8 +1261,8 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
--- a/src/main/java/org/bukkit/craftbukkit/util/DummyGeneratorAccess.java
|
||||
+++ b/src/main/java/org/bukkit/craftbukkit/util/DummyGeneratorAccess.java
|
||||
@@ -0,0 +0,0 @@ public class DummyGeneratorAccess implements WorldGenLevel {
|
||||
public Stream<? extends StructureStart<?>> startsForFeature(SectionPos pos, StructureFeature<?> feature) {
|
||||
throw new UnsupportedOperationException("Not supported yet.");
|
||||
public boolean destroyBlock(BlockPos pos, boolean drop, Entity breakingEntity, int maxUpdateDepth) {
|
||||
return false; // SPIGOT-6515
|
||||
}
|
||||
+
|
||||
+ // Paper start
|
|
@ -1,21 +0,0 @@
|
|||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
|
||||
Date: Sun, 11 Apr 2021 02:58:48 -0700
|
||||
Subject: [PATCH] Don't read neighbour chunk data off disk when converting
|
||||
chunks
|
||||
|
||||
Lighting is purged on update anyways, so let's not add more
|
||||
into the conversion process
|
||||
|
||||
diff --git a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
|
||||
+++ b/src/main/java/net/minecraft/world/level/chunk/storage/ChunkStorage.java
|
||||
@@ -0,0 +0,0 @@ public class ChunkStorage implements AutoCloseable {
|
||||
|
||||
// CraftBukkit start
|
||||
private boolean check(ServerChunkCache cps, int x, int z) throws IOException {
|
||||
+ if (true) return true; // Paper - this isn't even needed anymore, light is purged updating to 1.14+, why are we holding up the conversion process reading chunk data off disk - return true, we need to set light populated to true so the converter recognizes the chunk as being "full"
|
||||
ChunkPos pos = new ChunkPos(x, z);
|
||||
if (cps != null) {
|
||||
//com.google.common.base.Preconditions.checkState(org.bukkit.Bukkit.isPrimaryThread(), "primary thread"); // Paper - this function is now MT-Safe
|
|
@ -1,178 +0,0 @@
|
|||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Jason Penilla <11360596+jpenilla@users.noreply.github.com>
|
||||
Date: Sun, 22 Aug 2021 23:03:33 -0700
|
||||
Subject: [PATCH] Fix and optimize legacy world conversion
|
||||
|
||||
CraftBukkit breaks legacy world conversion in three ways:
|
||||
- Writes userdata to the path of the userdata folder rather than to
|
||||
the correct file inside the aforementioned folder. This causes the
|
||||
userdata folder to fail to be created as a file already exists at
|
||||
its path.
|
||||
- Makes changes to how multiworld works, without modifying
|
||||
McRegionUpgrader to be aware of these changes.
|
||||
- Calls methods on Bukkit before the server is initialized.
|
||||
|
||||
This patch fixes all of these issues, and also threads the
|
||||
McRegionUpgrader to improve performance.
|
||||
|
||||
diff --git a/src/main/java/net/minecraft/server/players/OldUsersConverter.java b/src/main/java/net/minecraft/server/players/OldUsersConverter.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/server/players/OldUsersConverter.java
|
||||
+++ b/src/main/java/net/minecraft/server/players/OldUsersConverter.java
|
||||
@@ -0,0 +0,0 @@ public class OldUsersConverter {
|
||||
}
|
||||
|
||||
private void movePlayerFile(File playerDataFolder, String fileName, String uuid) {
|
||||
- File file5 = new File(file, fileName + ".dat");
|
||||
+ File file5 = new File(file, fileName + ".dat"); // Paper - diff on change
|
||||
File file6 = new File(playerDataFolder, uuid + ".dat");
|
||||
|
||||
// CraftBukkit start - Use old file name to seed lastKnownName
|
||||
CompoundTag root = null;
|
||||
|
||||
try {
|
||||
- root = NbtIo.readCompressed(new java.io.FileInputStream(file5));
|
||||
+ root = NbtIo.readCompressed(new java.io.FileInputStream(file5)); // Paper - diff on change
|
||||
} catch (Exception exception) {
|
||||
exception.printStackTrace();
|
||||
ServerInternalException.reportInternalException(exception); // Paper
|
||||
@@ -0,0 +0,0 @@ public class OldUsersConverter {
|
||||
data.putString("lastKnownName", fileName);
|
||||
|
||||
try {
|
||||
- NbtIo.writeCompressed(root, new java.io.FileOutputStream(file2));
|
||||
+ NbtIo.writeCompressed(root, new java.io.FileOutputStream(file5)); // Paper - write to correct path (diff on change)
|
||||
} catch (Exception exception) {
|
||||
exception.printStackTrace();
|
||||
ServerInternalException.reportInternalException(exception); // Paper
|
||||
diff --git a/src/main/java/net/minecraft/world/level/storage/LevelStorageSource.java b/src/main/java/net/minecraft/world/level/storage/LevelStorageSource.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/world/level/storage/LevelStorageSource.java
|
||||
+++ b/src/main/java/net/minecraft/world/level/storage/LevelStorageSource.java
|
||||
@@ -0,0 +0,0 @@ public class LevelStorageSource {
|
||||
});
|
||||
}
|
||||
|
||||
+ // Paper start - copied from vanilla before below CB diff
|
||||
+ public File getDimensionPathForLegacyConversion(ResourceKey<Level> key) {
|
||||
+ return DimensionType.getStorageFolder(key, this.levelPath.toFile());
|
||||
+ }
|
||||
+ // Paper end
|
||||
+
|
||||
public File getDimensionPath(ResourceKey<Level> key) {
|
||||
return LevelStorageSource.getFolder(this.levelPath.toFile(), this.dimensionType); // CraftBukkit
|
||||
}
|
||||
diff --git a/src/main/java/net/minecraft/world/level/storage/McRegionUpgrader.java b/src/main/java/net/minecraft/world/level/storage/McRegionUpgrader.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/world/level/storage/McRegionUpgrader.java
|
||||
+++ b/src/main/java/net/minecraft/world/level/storage/McRegionUpgrader.java
|
||||
@@ -0,0 +0,0 @@ public class McRegionUpgrader {
|
||||
private static final String MCREGION_EXTENSION = ".mcr";
|
||||
|
||||
static boolean convertLevel(LevelStorageSource.LevelStorageAccess storageSession, ProgressListener progressListener) {
|
||||
+ // Paper start
|
||||
+ progressListener = new ProgressListener() {
|
||||
+ @Override
|
||||
+ public void progressStartNoAbort(net.minecraft.network.chat.Component title) {}
|
||||
+ @Override
|
||||
+ public void progressStart(net.minecraft.network.chat.Component title) {}
|
||||
+ @Override
|
||||
+ public void progressStage(net.minecraft.network.chat.Component task) {}
|
||||
+ @Override
|
||||
+ public void progressStagePercentage(int percentage) {}
|
||||
+ @Override
|
||||
+ public void stop() {}
|
||||
+ };
|
||||
+ // Paper end
|
||||
progressListener.progressStagePercentage(0);
|
||||
List<File> list = Lists.newArrayList();
|
||||
List<File> list2 = Lists.newArrayList();
|
||||
List<File> list3 = Lists.newArrayList();
|
||||
- File file = storageSession.getDimensionPath(Level.OVERWORLD);
|
||||
- File file2 = storageSession.getDimensionPath(Level.NETHER);
|
||||
- File file3 = storageSession.getDimensionPath(Level.END);
|
||||
+ // Paper start
|
||||
+ File file = storageSession.getDimensionPathForLegacyConversion(Level.OVERWORLD);
|
||||
+ File file2 = storageSession.getDimensionPathForLegacyConversion(Level.NETHER);
|
||||
+ File file3 = storageSession.getDimensionPathForLegacyConversion(Level.END);
|
||||
+ // Paper end
|
||||
LOGGER.info("Scanning folders...");
|
||||
addRegionFiles(file, list);
|
||||
if (file2.exists()) {
|
||||
@@ -0,0 +0,0 @@ public class McRegionUpgrader {
|
||||
}
|
||||
|
||||
private static void convertRegions(RegistryAccess.RegistryHolder registryManager, File directory, Iterable<File> files, BiomeSource biomeSource, int i, int j, ProgressListener progressListener) {
|
||||
- for(File file : files) {
|
||||
- convertRegion(registryManager, directory, file, biomeSource, i, j, progressListener);
|
||||
- ++i;
|
||||
- int k = (int)Math.round(100.0D * (double)i / (double)j);
|
||||
- progressListener.progressStagePercentage(k);
|
||||
+ // Paper start - thread this because it's dead simple
|
||||
+ convertRegionsThreaded(registryManager, directory, files, biomeSource, i, j, progressListener);
|
||||
+ }
|
||||
+
|
||||
+ private static void convertRegionsThreaded(RegistryAccess.RegistryHolder registryManager, File directory, Iterable<File> files, BiomeSource biomeSource, int progress, int total, ProgressListener progressListener) {
|
||||
+ if (!files.iterator().hasNext()) {
|
||||
+ return;
|
||||
}
|
||||
|
||||
+ final int threads = Runtime.getRuntime().availableProcessors() / 2;
|
||||
+ final java.util.concurrent.ExecutorService threadPool = java.util.concurrent.Executors.newFixedThreadPool(Math.max(1, threads), new java.util.concurrent.ThreadFactory() {
|
||||
+ private final java.util.concurrent.atomic.AtomicInteger threadCounter = new java.util.concurrent.atomic.AtomicInteger();
|
||||
+
|
||||
+ @Override
|
||||
+ public Thread newThread(final Runnable run) {
|
||||
+ final Thread ret = new Thread(run);
|
||||
+
|
||||
+ ret.setName("World upgrader thread for directory " + directory + " #" + this.threadCounter.getAndIncrement());
|
||||
+ ret.setUncaughtExceptionHandler((thread, throwable) -> {
|
||||
+ LOGGER.fatal("Error upgrading world", throwable);
|
||||
+ });
|
||||
+
|
||||
+ return ret;
|
||||
+ }
|
||||
+ });
|
||||
+ final java.util.concurrent.atomic.AtomicInteger converted = new java.util.concurrent.atomic.AtomicInteger(progress);
|
||||
+
|
||||
+ final long start = System.nanoTime();
|
||||
+
|
||||
+ for (final File file : files) {
|
||||
+ threadPool.execute(() -> {
|
||||
+ convertRegion(registryManager, directory, file, biomeSource, 0, total, progressListener);
|
||||
+ converted.incrementAndGet();
|
||||
+ });
|
||||
+ }
|
||||
+ threadPool.shutdown();
|
||||
+
|
||||
+ final java.text.DecimalFormat format = new java.text.DecimalFormat("#0.00");
|
||||
+ while (!threadPool.isTerminated()) {
|
||||
+ final int getConverted = converted.get();
|
||||
+ LOGGER.info("Converting... {}/{} ({}%)", getConverted, total, format.format(100.0D * (double) getConverted / (double) total));
|
||||
+ try {
|
||||
+ Thread.sleep(1000L);
|
||||
+ } catch (final InterruptedException ignored) {}
|
||||
+ }
|
||||
+
|
||||
+ final long end = System.nanoTime();
|
||||
+
|
||||
+ final double durationSeconds = Math.ceil((end - start) * 1.0e-9);
|
||||
+ LOGGER.info("Conversion for {} completed in {}s", directory, durationSeconds);
|
||||
}
|
||||
+ // Paper end
|
||||
|
||||
private static void convertRegion(RegistryAccess.RegistryHolder registryManager, File directory, File file, BiomeSource biomeSource, int i, int j, ProgressListener progressListener) {
|
||||
String string = file.getName();
|
||||
diff --git a/src/main/java/net/minecraft/world/level/storage/PrimaryLevelData.java b/src/main/java/net/minecraft/world/level/storage/PrimaryLevelData.java
|
||||
index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 100644
|
||||
--- a/src/main/java/net/minecraft/world/level/storage/PrimaryLevelData.java
|
||||
+++ b/src/main/java/net/minecraft/world/level/storage/PrimaryLevelData.java
|
||||
@@ -0,0 +0,0 @@ public class PrimaryLevelData implements ServerLevelData, WorldData {
|
||||
levelTag.putUUID("WanderingTraderId", this.wanderingTraderId);
|
||||
}
|
||||
|
||||
- levelTag.putString("Bukkit.Version", Bukkit.getName() + "/" + Bukkit.getVersion() + "/" + Bukkit.getBukkitVersion()); // CraftBukkit
|
||||
+ if (Bukkit.getServer() != null) levelTag.putString("Bukkit.Version", Bukkit.getName() + "/" + Bukkit.getVersion() + "/" + Bukkit.getBukkitVersion()); // CraftBukkit // Paper - server may not be started yet
|
||||
}
|
||||
|
||||
@Override
|
3
todo.txt
3
todo.txt
|
@ -26,3 +26,6 @@ index 0000000000000000000000000000000000000000..00000000000000000000000000000000
|
|||
|
||||
|
||||
check ChunkHolder#updateFutures async catcher
|
||||
|
||||
|
||||
leaf: check mid tick chunk task diff in ServerChunkCache
|
||||
|
|
Loading…
Reference in a new issue