PaperMC/Spigot-Server-Patches/0379-Make-region-files-more-reliable-to-write-to.patch
Spottedleaf 5c7081fecc Update upstream & fix some chunk related issues (#2177)
* Updated Upstream (Bukkit/CraftBukkit)

Upstream has released updates that appears to apply and compile correctly.
This update has not been tested by PaperMC and as with ANY update, please do your own testing

Bukkit Changes:
45690fe9 SPIGOT-5047: Correct slot types for 1.14 inventories

CraftBukkit Changes:
4090d01f SPIGOT-5047: Correct slot types for 1.14 inventories
e8c08362 SPIGOT-5046: World#getLoadedChunks returning inaccessible cached chunks.
d445af3b SPIGOT-5067: Add item meta for 1.14 spawn eggs

* Bring Chunk load checks in-line with spigot

As of the last upstream merge spigot now checks ticket level status
when returning loaded chunks for a world from api. Now our checks
will respect that decision.

* Fix spawn ticket levels

Vanilla would keep the inner chunks of spawn available for ticking,
however my changes made all chunks non-ticking. Resolve by changing
ticket levels for spawn chunks inside the border to respect this
behavior.


* Make World#getChunkIfLoadedImmediately return only entity ticking chunks

Mojang appears to be using chunks with level > 33 (non-ticking chunks)
as cached chunks and not actually loaded chunks.

* Bring all loaded checks in line with spigot

Loaded chunks must be at least border  chunks, or level <= 33
2019-06-14 03:27:40 +01:00

201 lines
8.8 KiB
Diff

From b85b6a8d71b7831be7c2044aa4f04e27b95d787d Mon Sep 17 00:00:00 2001
From: Spottedleaf <Spottedleaf@users.noreply.github.com>
Date: Mon, 1 Apr 2019 18:57:32 -0700
Subject: [PATCH] Make region files more reliable to write to
Previously we would write to header before writing our chunk data,
which opens a window for corruption (or we would overwrite entirely).
Now the saving process has been changed to follow this chain of events:
1. We always allocate a new space to write so we do not potentially
overwrite and corrupt the current data
2. Write the chunk data first (the order of the fields in
the chunk data isn't relevant though)
3. Flush to disk (if the launch flag is used)
4. Write to the region header last
5. Flush to disk (if the launch flag is used)
6. Then we free the previous space allocated
With this chain of events it is impossible for a chunk write to corrupt
a region file, unless the operating system has lied and we have NOT flushed
to disk.
However server administrators are still recommended to continue performing
regular backups.
Note that when Mojang finally decides to change their region format
to deal with oversized chunks this patch must be changed to deal with
whatever system they decide to impose.
If the paper.flush-on-save startup flag is set to true, then the
steps 3 and 5 will make a call to sync() on the region file's fd,
effectively flushing to disk.
We also make use of two flushes to disk per chunk save (to ensure
ordering and ensure data has gone to disk), so this will negatively
affect save performance if the startup flag is used (especially on
HDDs).
diff --git a/src/main/java/net/minecraft/server/RegionFile.java b/src/main/java/net/minecraft/server/RegionFile.java
index ed2ccebb23..2e14d84657 100644
--- a/src/main/java/net/minecraft/server/RegionFile.java
+++ b/src/main/java/net/minecraft/server/RegionFile.java
@@ -29,7 +29,7 @@ public class RegionFile implements AutoCloseable {
private final RandomAccessFile b; private RandomAccessFile getDataFile() { return this.b; } // Paper - OBFHELPER
private final int[] c = new int[1024]; private int[] offsets = c; // Paper - OBFHELPER
private final int[] d = new int[1024];private int[] timestamps = d; // Paper - OBFHELPER
- private final List<Boolean> e;
+ private final List<Boolean> e; private List<Boolean> getFreeSectors() { return this.e; } // Paper - OBFHELPER
public RegionFile(File file) throws IOException {
this.b = new RandomAccessFile(file, "rw");
@@ -191,8 +191,8 @@ public class RegionFile implements AutoCloseable {
protected synchronized void a(ChunkCoordIntPair chunkcoordintpair, byte[] abyte, int i) {
try {
int j = this.getOffset(chunkcoordintpair);
- int k = j >> 8;
- int l = j & 255;
+ int k = j >> 8; final int oldSectorOffset = k; // Paper - store variable for later
+ int l = j & 255; final int oldSectorCount; // Paper - store variable for later
// Spigot start
if (l == 255) {
this.b.seek(k * 4096);
@@ -200,6 +200,7 @@ public class RegionFile implements AutoCloseable {
}
// Spigot end
int i1 = (i + 5) / 4096 + 1;
+ oldSectorCount = l; // Paper - store variable for later (watch out for re-assignments of l)
if (i1 >= 256) {
// Spigot start
@@ -209,14 +210,12 @@ public class RegionFile implements AutoCloseable {
// Spigot end
}
- if (k != 0 && l == i1) {
+ if (false && k != 0 && l == i1) { // Paper - We never want to overrite old data
this.a(k, abyte, i);
} else {
int j1;
- for (j1 = 0; j1 < l; ++j1) {
- this.e.set(k + j1, true);
- }
+ // Paper - We do not free old sectors until we are done writing the new chunk data
j1 = this.e.indexOf(true);
int k1 = 0;
@@ -243,13 +242,13 @@ public class RegionFile implements AutoCloseable {
if (k1 >= i1) {
k = j1;
- this.a(chunkcoordintpair, j1 << 8 | (i1 > 255 ? 255 : i1)); // Spigot
+ //this.a(chunkcoordintpair, j1 << 8 | (i1 > 255 ? 255 : i1)); // Spigot // Paper - We only write to header after we've written chunk data
for (l1 = 0; l1 < i1; ++l1) {
this.e.set(k + l1, false);
}
- this.a(k, abyte, i);
+ this.writeChunk(chunkcoordintpair, j1 << 8 | (i1 > 255 ? 255 : i1), k, abyte, i); // Paper - Ensure we do not corrupt region files
} else {
this.b.seek(this.b.length());
k = this.e.size();
@@ -259,9 +258,14 @@ public class RegionFile implements AutoCloseable {
this.e.add(false);
}
- this.a(k, abyte, i);
- this.a(chunkcoordintpair, k << 8 | (i1 > 255 ? 255 : i1)); // Spigot
+ this.writeChunk(chunkcoordintpair, k << 8 | (i1 > 255 ? 255 : i1), k, abyte, i); // Paper - Ensure we do not corrupt region files
+ }
+
+ // Paper start - Now that we've written the new chunk we can free the old data
+ for (int off = 0; off < oldSectorCount; ++off) {
+ this.getFreeSectors().set(oldSectorOffset + off, true);
}
+ // Paper end
}
this.b(chunkcoordintpair, (int) (SystemUtils.getTimeMillis() / 1000L));
@@ -271,10 +275,10 @@ public class RegionFile implements AutoCloseable {
}
+ private void writeChunkData(final int sectorOffset, final byte[] data, final int dataLength) throws IOException { this.a(sectorOffset, data, dataLength); } // Paper - OBFHELPER
private void a(int i, byte[] abyte, int j) throws IOException {
this.b.seek((long) (i * 4096));
- this.b.writeInt(j + 1);
- this.b.writeByte(2);
+ this.writeIntAndByte(j + 1, (byte)2); // Paper - Avoid 4 io write calls
this.b.write(abyte, 0, j);
}
@@ -286,12 +290,13 @@ public class RegionFile implements AutoCloseable {
return this.getOffset(chunkcoordintpair) != 0;
}
+ private void updateChunkHeader(ChunkCoordIntPair chunkcoordintpair, final int offset) throws IOException { this.a(chunkcoordintpair, offset); } // Paper - OBFHELPER
private void a(ChunkCoordIntPair chunkcoordintpair, int i) throws IOException {
int j = this.f(chunkcoordintpair);
this.c[j] = i;
this.b.seek((long) (j * 4));
- this.b.writeInt(i);
+ this.writeInt(i); // Paper - Avoid 3 io write calls
}
private int f(ChunkCoordIntPair chunkcoordintpair) {
@@ -303,7 +308,7 @@ public class RegionFile implements AutoCloseable {
this.d[j] = i;
this.b.seek((long) (4096 + j * 4));
- this.b.writeInt(i);
+ this.writeInt(i); // Paper - Avoid 3 io write calls
}
public void close() throws IOException {
@@ -311,6 +316,40 @@ public class RegionFile implements AutoCloseable {
}
// Paper start
+ private static final boolean FLUSH_ON_SAVE = Boolean.getBoolean("paper.flush-on-save");
+ private void syncRegionFile() throws IOException {
+ if (!FLUSH_ON_SAVE) {
+ return;
+ }
+ this.getDataFile().getFD().sync(); // rethrow exception as we want to avoid corrupting a regionfile
+ }
+
+ private final java.nio.ByteBuffer scratchBuffer = java.nio.ByteBuffer.allocate(8);
+
+ private void writeInt(final int value) throws IOException {
+ synchronized (this.scratchBuffer) {
+ this.scratchBuffer.putInt(0, value);
+ this.getDataFile().write(this.scratchBuffer.array(), 0, 4);
+ }
+ }
+
+ // writes v1 then v2
+ private void writeIntAndByte(final int v1, final byte v2) throws IOException {
+ synchronized (this.scratchBuffer) {
+ this.scratchBuffer.putInt(0, v1);
+ this.scratchBuffer.put(4, v2);
+ this.getDataFile().write(this.scratchBuffer.array(), 0, 5);
+ }
+ }
+
+ private void writeChunk(final ChunkCoordIntPair chunk, final int chunkHeaderData,
+ final int chunkOffset, final byte[] chunkData, final int chunkDataLength) throws IOException {
+ this.writeChunkData(chunkOffset, chunkData, chunkDataLength);
+ this.syncRegionFile(); // Sync is required to ensure the previous data is written successfully
+ this.updateChunkHeader(chunk, chunkHeaderData);
+ this.syncRegionFile(); // Ensure header changes go through
+ }
+
public synchronized void deleteChunk(int j1) {
backup();
int k = offsets[j1];
--
2.21.0