mirror of
https://github.com/protomaps/PMTiles.git
synced 2026-02-04 02:41:09 +00:00
python: pmtiles-convert from mbtiles writes v3 spec
This commit is contained in:
@@ -15,9 +15,6 @@ parser.add_argument("output", help="Output .mbtiles, .pmtiles, or directory")
|
|||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--maxzoom", help="the maximum zoom level to include in the output."
|
"--maxzoom", help="the maximum zoom level to include in the output."
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
|
||||||
"--gzip", help="The output should be gzip-compressed.", action="store_true"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--overwrite", help="Overwrite the existing output.", action="store_true"
|
"--overwrite", help="Overwrite the existing output.", action="store_true"
|
||||||
)
|
)
|
||||||
@@ -32,16 +29,14 @@ if args.overwrite:
|
|||||||
elif os.path.isdir(args.output):
|
elif os.path.isdir(args.output):
|
||||||
shutil.rmtree(args.output)
|
shutil.rmtree(args.output)
|
||||||
|
|
||||||
print("compression:", "gzip" if args.gzip else "disabled")
|
|
||||||
|
|
||||||
if args.input.endswith(".mbtiles") and args.output.endswith(".pmtiles"):
|
if args.input.endswith(".mbtiles") and args.output.endswith(".pmtiles"):
|
||||||
mbtiles_to_pmtiles(args.input, args.output, args.maxzoom, args.gzip)
|
mbtiles_to_pmtiles(args.input, args.output, args.maxzoom)
|
||||||
|
|
||||||
elif args.input.endswith(".pmtiles") and args.output.endswith(".mbtiles"):
|
elif args.input.endswith(".pmtiles") and args.output.endswith(".mbtiles"):
|
||||||
pmtiles_to_mbtiles(args.input, args.output, args.gzip)
|
pmtiles_to_mbtiles(args.input, args.output)
|
||||||
|
|
||||||
elif args.input.endswith(".pmtiles"):
|
elif args.input.endswith(".pmtiles"):
|
||||||
pmtiles_to_dir(args.input, args.output, args.gzip)
|
pmtiles_to_dir(args.input, args.output)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("Conversion not implemented")
|
print("Conversion not implemented")
|
||||||
|
|||||||
@@ -5,97 +5,135 @@ import os
|
|||||||
import sqlite3
|
import sqlite3
|
||||||
from pmtiles.writer import write
|
from pmtiles.writer import write
|
||||||
from pmtiles.reader import Reader, MmapSource
|
from pmtiles.reader import Reader, MmapSource
|
||||||
|
from .tile import zxy_to_tileid, tileid_to_zxy, TileType, Compression
|
||||||
# if the tile is GZIP-encoded, it won't work with range queries
|
|
||||||
# until transfer-encoding: gzip is well supported.
|
|
||||||
def force_compress(data, compress):
|
|
||||||
if compress and data[0:2] != b"\x1f\x8b":
|
|
||||||
return gzip.compress(data)
|
|
||||||
if not compress and data[0:2] == b"\x1f\x8b":
|
|
||||||
return gzip.decompress(data)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def set_metadata_compression(metadata, gzip):
|
def mbtiles_to_header_json(mbtiles_metadata):
|
||||||
if gzip:
|
header = {}
|
||||||
metadata["compression"] = "gzip"
|
|
||||||
|
header["min_zoom"] = int(mbtiles_metadata["minzoom"])
|
||||||
|
del mbtiles_metadata["minzoom"]
|
||||||
|
|
||||||
|
header["max_zoom"] = int(mbtiles_metadata["maxzoom"])
|
||||||
|
del mbtiles_metadata["maxzoom"]
|
||||||
|
|
||||||
|
bounds = mbtiles_metadata["bounds"].split(",")
|
||||||
|
header["min_lon_e7"] = int(float(bounds[0]) * 10000000)
|
||||||
|
header["min_lat_e7"] = int(float(bounds[1]) * 10000000)
|
||||||
|
header["max_lon_e7"] = int(float(bounds[2]) * 10000000)
|
||||||
|
header["max_lat_e7"] = int(float(bounds[3]) * 10000000)
|
||||||
|
del mbtiles_metadata["bounds"]
|
||||||
|
|
||||||
|
center = mbtiles_metadata["center"].split(",")
|
||||||
|
header["center_lon_e7"] = int(float(center[0]) * 10000000)
|
||||||
|
header["center_lat_e7"] = int(float(center[1]) * 10000000)
|
||||||
|
header["center_zoom"] = int(center[2])
|
||||||
|
del mbtiles_metadata["center"]
|
||||||
|
|
||||||
|
tile_format = mbtiles_metadata["format"]
|
||||||
|
if tile_format == "pbf":
|
||||||
|
header["tile_type"] = TileType.MVT
|
||||||
|
elif tile_format == "png":
|
||||||
|
header["tile_type"] = TileType.PNG
|
||||||
|
elif tile_format == "jpeg":
|
||||||
|
header["tile_type"] = TileType.JPEG
|
||||||
|
elif tile_format == "webp":
|
||||||
|
header["tile_type"] = TileType.WEBP
|
||||||
else:
|
else:
|
||||||
try:
|
header["tile_type"] = TileType.UNKNOWN
|
||||||
del metadata["compression"]
|
|
||||||
except:
|
if mbtiles_metadata.get("compression") == "gzip":
|
||||||
pass
|
header["tile_compression"] = Compression.GZIP # TODO: does this ever matter?
|
||||||
return metadata
|
else:
|
||||||
|
header["tile_compression"] = Compression.UNKNOWN
|
||||||
|
|
||||||
|
return header, mbtiles_metadata
|
||||||
|
|
||||||
|
|
||||||
def mbtiles_to_pmtiles(input, output, maxzoom, gzip):
|
def mbtiles_to_pmtiles(input, output, maxzoom):
|
||||||
conn = sqlite3.connect(input)
|
conn = sqlite3.connect(input)
|
||||||
cursor = conn.cursor()
|
cursor = conn.cursor()
|
||||||
|
|
||||||
with write(output) as writer:
|
with write(output) as writer:
|
||||||
|
|
||||||
|
# collect a set of all tile IDs
|
||||||
|
tileid_set = []
|
||||||
for row in cursor.execute(
|
for row in cursor.execute(
|
||||||
"SELECT zoom_level,tile_column,tile_row,tile_data FROM tiles WHERE zoom_level <= ? ORDER BY zoom_level,tile_column,tile_row ASC",
|
"SELECT zoom_level,tile_column,tile_row FROM tiles WHERE zoom_level <= ?",
|
||||||
(maxzoom or 99,),
|
(maxzoom or 99,),
|
||||||
):
|
):
|
||||||
flipped = (1 << row[0]) - 1 - row[2]
|
flipped = (1 << row[0]) - 1 - row[2]
|
||||||
writer.write_tile(row[0], row[1], flipped, force_compress(row[3], gzip))
|
tileid_set.append(zxy_to_tileid(row[0], row[1], flipped))
|
||||||
|
|
||||||
metadata = {}
|
tileid_set.sort()
|
||||||
|
|
||||||
|
# query the db in ascending tile order
|
||||||
|
for tileid in tileid_set:
|
||||||
|
z, x, y = tileid_to_zxy(tileid)
|
||||||
|
flipped = (1 << z) - 1 - y
|
||||||
|
res = cursor.execute(
|
||||||
|
"SELECT tile_data FROM tiles WHERE zoom_level = ? AND tile_column = ? AND tile_row = ?",
|
||||||
|
(z, x, flipped),
|
||||||
|
)
|
||||||
|
data = res.fetchone()[0]
|
||||||
|
# force gzip compression only for vector
|
||||||
|
if data[0:2] != b"\x1f\x8b":
|
||||||
|
data = gzip.compress(data)
|
||||||
|
writer.write_tile(tileid, data)
|
||||||
|
|
||||||
|
mbtiles_metadata = {}
|
||||||
for row in cursor.execute("SELECT name,value FROM metadata"):
|
for row in cursor.execute("SELECT name,value FROM metadata"):
|
||||||
metadata[row[0]] = row[1]
|
mbtiles_metadata[row[0]] = row[1]
|
||||||
if maxzoom:
|
|
||||||
metadata["maxzoom"] = str(maxzoom)
|
pmtiles_header, pmtiles_metadata = mbtiles_to_header_json(mbtiles_metadata)
|
||||||
metadata = set_metadata_compression(metadata, gzip)
|
result = writer.finalize(pmtiles_header, pmtiles_metadata)
|
||||||
result = writer.finalize(metadata)
|
|
||||||
print("Num tiles:", result["num_tiles"])
|
|
||||||
print("Num unique tiles:", result["num_unique_tiles"])
|
|
||||||
print("Num leaves:", result["num_leaves"])
|
|
||||||
|
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
def pmtiles_to_mbtiles(input, output, gzip):
|
def pmtiles_to_mbtiles(input, output):
|
||||||
conn = sqlite3.connect(output)
|
pass
|
||||||
cursor = conn.cursor()
|
# conn = sqlite3.connect(output)
|
||||||
cursor.execute("CREATE TABLE metadata (name text, value text);")
|
# cursor = conn.cursor()
|
||||||
cursor.execute(
|
# cursor.execute("CREATE TABLE metadata (name text, value text);")
|
||||||
"CREATE TABLE tiles (zoom_level integer, tile_column integer, tile_row integer, tile_data blob);"
|
# cursor.execute(
|
||||||
)
|
# "CREATE TABLE tiles (zoom_level integer, tile_column integer, tile_row integer, tile_data blob);"
|
||||||
|
# )
|
||||||
|
|
||||||
with open(input, "r+b") as f:
|
# with open(input, "r+b") as f:
|
||||||
source = MmapSource(f)
|
# source = MmapSource(f)
|
||||||
reader = Reader(source)
|
# reader = Reader(source)
|
||||||
metadata = reader.header().metadata
|
# metadata = reader.header().metadata
|
||||||
metadata = set_metadata_compression(metadata, gzip)
|
# for k, v in metadata.items():
|
||||||
for k, v in metadata.items():
|
# cursor.execute("INSERT INTO metadata VALUES(?,?)", (k, v))
|
||||||
cursor.execute("INSERT INTO metadata VALUES(?,?)", (k, v))
|
# for tile, data in reader.tiles():
|
||||||
for tile, data in reader.tiles():
|
# flipped = (1 << tile[0]) - 1 - tile[2]
|
||||||
flipped = (1 << tile[0]) - 1 - tile[2]
|
# cursor.execute(
|
||||||
cursor.execute(
|
# "INSERT INTO tiles VALUES(?,?,?,?)",
|
||||||
"INSERT INTO tiles VALUES(?,?,?,?)",
|
# (tile[0], tile[1], flipped, force_compress(data, gzip)),
|
||||||
(tile[0], tile[1], flipped, force_compress(data, gzip)),
|
# )
|
||||||
)
|
|
||||||
|
|
||||||
cursor.execute(
|
# cursor.execute(
|
||||||
"CREATE UNIQUE INDEX tile_index on tiles (zoom_level, tile_column, tile_row);"
|
# "CREATE UNIQUE INDEX tile_index on tiles (zoom_level, tile_column, tile_row);"
|
||||||
)
|
# )
|
||||||
conn.commit()
|
# conn.commit()
|
||||||
conn.close()
|
# conn.close()
|
||||||
|
|
||||||
|
|
||||||
def pmtiles_to_dir(input, output, gzip):
|
def pmtiles_to_dir(input, output):
|
||||||
os.makedirs(output)
|
pass
|
||||||
|
# os.makedirs(output)
|
||||||
|
|
||||||
with open(input, "r+b") as f:
|
# with open(input, "r+b") as f:
|
||||||
source = MmapSource(f)
|
# source = MmapSource(f)
|
||||||
reader = Reader(source)
|
# reader = Reader(source)
|
||||||
metadata = reader.header().metadata
|
# metadata = reader.header().metadata
|
||||||
metadata = set_metadata_compression(metadata, gzip)
|
# with open(os.path.join(output, "metadata.json"), "w") as f:
|
||||||
with open(os.path.join(output, "metadata.json"), "w") as f:
|
# f.write(json.dumps(metadata))
|
||||||
f.write(json.dumps(metadata))
|
|
||||||
|
|
||||||
for tile, data in reader.tiles():
|
# for tile, data in reader.tiles():
|
||||||
directory = os.path.join(output, str(tile[0]), str(tile[1]))
|
# directory = os.path.join(output, str(tile[0]), str(tile[1]))
|
||||||
path = os.path.join(directory, str(tile[2]) + "." + metadata["format"])
|
# path = os.path.join(directory, str(tile[2]) + "." + metadata["format"])
|
||||||
os.makedirs(directory, exist_ok=True)
|
# os.makedirs(directory, exist_ok=True)
|
||||||
with open(path, "wb") as f:
|
# with open(path, "wb") as f:
|
||||||
f.write(force_compress(data, gzip))
|
# f.write(force_compress(data, gzip))
|
||||||
|
|||||||
@@ -194,7 +194,7 @@ def deserialize_header(buf):
|
|||||||
return int.from_bytes(buf[pos : pos + 8], byteorder="little")
|
return int.from_bytes(buf[pos : pos + 8], byteorder="little")
|
||||||
|
|
||||||
def read_int32(pos):
|
def read_int32(pos):
|
||||||
return int.from_bytes(buf[pos : pos + 4], byteorder="little")
|
return int.from_bytes(buf[pos : pos + 4], byteorder="little", signed=True)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"root_offset": read_uint64(8),
|
"root_offset": read_uint64(8),
|
||||||
@@ -231,7 +231,7 @@ def serialize_header(h):
|
|||||||
b_io.write(i.to_bytes(8, byteorder="little"))
|
b_io.write(i.to_bytes(8, byteorder="little"))
|
||||||
|
|
||||||
def write_int32(i):
|
def write_int32(i):
|
||||||
b_io.write(i.to_bytes(4, byteorder="little"))
|
b_io.write(i.to_bytes(4, byteorder="little", signed=True))
|
||||||
|
|
||||||
def write_uint8(i):
|
def write_uint8(i):
|
||||||
b_io.write(i.to_bytes(1, byteorder="little"))
|
b_io.write(i.to_bytes(1, byteorder="little"))
|
||||||
|
|||||||
@@ -1,153 +1,133 @@
|
|||||||
import itertools
|
|
||||||
import json
|
import json
|
||||||
|
import tempfile
|
||||||
|
import gzip
|
||||||
|
import shutil
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from pmtiles import Entry
|
from .tile import Entry, serialize_directory, Compression, serialize_header
|
||||||
|
|
||||||
|
|
||||||
def entrysort(t):
|
|
||||||
return (t.z, t.x, t.y)
|
|
||||||
|
|
||||||
|
|
||||||
# Find best base zoom to avoid extra indirection for as many tiles as we can
|
|
||||||
# precondition: entries is sorted, only tile entries, len(entries) > max_dir_size
|
|
||||||
def find_leaf_level(entries, max_dir_size):
|
|
||||||
return entries[max_dir_size].z - 1
|
|
||||||
|
|
||||||
|
|
||||||
def make_pyramid(tile_entries, start_leaf_offset, max_dir_size=21845):
|
|
||||||
sorted_entries = sorted(tile_entries, key=entrysort)
|
|
||||||
if len(sorted_entries) <= max_dir_size:
|
|
||||||
return (sorted_entries, [])
|
|
||||||
|
|
||||||
leaf_dirs = []
|
|
||||||
|
|
||||||
# determine root leaf level
|
|
||||||
leaf_level = find_leaf_level(sorted_entries, max_dir_size)
|
|
||||||
|
|
||||||
def by_parent(e):
|
|
||||||
level_diff = e.z - leaf_level
|
|
||||||
return (leaf_level, e.x // (1 << level_diff), e.y // (1 << level_diff))
|
|
||||||
|
|
||||||
root_entries = [e for e in sorted_entries if e.z < leaf_level]
|
|
||||||
# get all entries greater than or equal to the leaf level
|
|
||||||
entries_in_leaves = [e for e in sorted_entries if e.z >= leaf_level]
|
|
||||||
|
|
||||||
# group the entries by their parent (stable)
|
|
||||||
entries_in_leaves.sort(key=by_parent)
|
|
||||||
|
|
||||||
current_offset = start_leaf_offset
|
|
||||||
# pack entries into groups
|
|
||||||
packed_entries = []
|
|
||||||
packed_roots = []
|
|
||||||
|
|
||||||
for group in itertools.groupby(entries_in_leaves, key=by_parent):
|
|
||||||
subpyramid_entries = list(group[1])
|
|
||||||
|
|
||||||
root = by_parent(subpyramid_entries[0])
|
|
||||||
if len(packed_entries) + len(subpyramid_entries) <= max_dir_size:
|
|
||||||
packed_entries.extend(subpyramid_entries)
|
|
||||||
packed_roots.append((root[0], root[1], root[2]))
|
|
||||||
else:
|
|
||||||
# flush the current packed entries
|
|
||||||
|
|
||||||
for p in packed_roots:
|
|
||||||
root_entries.append(
|
|
||||||
Entry(
|
|
||||||
p[0], p[1], p[2], current_offset, 17 * len(packed_entries), True
|
|
||||||
)
|
|
||||||
)
|
|
||||||
# re-sort the packed_entries by ZXY order
|
|
||||||
packed_entries.sort(key=entrysort)
|
|
||||||
leaf_dirs.append(packed_entries)
|
|
||||||
|
|
||||||
current_offset += 17 * len(packed_entries)
|
|
||||||
packed_entries = subpyramid_entries
|
|
||||||
packed_roots = [(root[0], root[1], root[2])]
|
|
||||||
|
|
||||||
# finalize the last set
|
|
||||||
if len(packed_entries):
|
|
||||||
|
|
||||||
for p in packed_roots:
|
|
||||||
root_entries.append(
|
|
||||||
Entry(p[0], p[1], p[2], current_offset, 17 * len(packed_entries), True)
|
|
||||||
)
|
|
||||||
# re-sort the packed_entries by ZXY order
|
|
||||||
packed_entries.sort(key=entrysort)
|
|
||||||
leaf_dirs.append(packed_entries)
|
|
||||||
|
|
||||||
return (root_entries, leaf_dirs)
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def write(fname):
|
def write(fname):
|
||||||
f = open(fname, "wb")
|
f = open(fname, "wb")
|
||||||
w = Writer(f, 21845)
|
w = Writer(f)
|
||||||
try:
|
try:
|
||||||
yield w
|
yield w
|
||||||
finally:
|
finally:
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
|
|
||||||
|
def build_roots_leaves(entries, leaf_size):
|
||||||
|
root_entries = []
|
||||||
|
leaves_bytes = b""
|
||||||
|
num_leaves = 0
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
while i < len(entries):
|
||||||
|
num_leaves += 1
|
||||||
|
serialized = serialize_directory(entries[i : i + leaf_size])
|
||||||
|
root_entries.append(
|
||||||
|
Entry(entries[0].tile_id, len(leaves_bytes), len(serialized), 0)
|
||||||
|
)
|
||||||
|
leaves_bytes += serialized
|
||||||
|
i += leaf_size
|
||||||
|
|
||||||
|
return serialize_directory(root_entries), leaves_bytes, num_leaves
|
||||||
|
|
||||||
|
|
||||||
|
def optimize_directories(entries, target_root_len):
|
||||||
|
test_bytes = serialize_directory(entries)
|
||||||
|
if len(test_bytes) < target_root_len:
|
||||||
|
return test_bytes, b"", 0
|
||||||
|
|
||||||
|
leaf_size = 4096
|
||||||
|
while True:
|
||||||
|
root_bytes, leaves_bytes, num_leaves = build_roots_leaves(entries, leaf_size)
|
||||||
|
if len(root_bytes) < target_root_len:
|
||||||
|
return root_bytes, leaves_bytes, num_leaves
|
||||||
|
leaf_size *= 2
|
||||||
|
|
||||||
|
|
||||||
class Writer:
|
class Writer:
|
||||||
def __init__(self, f, max_dir_size):
|
def __init__(self, f):
|
||||||
self.offset = 512000
|
|
||||||
self.f = f
|
self.f = f
|
||||||
self.f.write(b"\0" * self.offset)
|
|
||||||
self.tile_entries = []
|
self.tile_entries = []
|
||||||
self.hash_to_offset = {}
|
self.hash_to_offset = {}
|
||||||
self.max_dir_size = max_dir_size
|
self.tile_f = tempfile.TemporaryFile()
|
||||||
|
self.offset = 0
|
||||||
|
self.addressed_tiles = 0
|
||||||
|
|
||||||
def write_tile(self, z, x, y, data):
|
# TODO enforce ordered writes
|
||||||
|
def write_tile(self, tileid, data):
|
||||||
hsh = hash(data)
|
hsh = hash(data)
|
||||||
if hsh in self.hash_to_offset:
|
if hsh in self.hash_to_offset:
|
||||||
self.tile_entries.append(
|
last = self.tile_entries[-1]
|
||||||
Entry(z, x, y, self.hash_to_offset[hsh], len(data), False)
|
found = self.hash_to_offset[hsh]
|
||||||
)
|
if tileid == last.tile_id + last.run_length and last.offset == found:
|
||||||
|
self.tile_entries[-1].run_length += 1
|
||||||
else:
|
else:
|
||||||
self.f.write(data)
|
self.tile_entries.append(Entry(tileid, found, len(data), 1))
|
||||||
self.tile_entries.append(Entry(z, x, y, self.offset, len(data), False))
|
else:
|
||||||
|
self.tile_f.write(data)
|
||||||
|
self.tile_entries.append(Entry(tileid, self.offset, len(data), 1))
|
||||||
self.hash_to_offset[hsh] = self.offset
|
self.hash_to_offset[hsh] = self.offset
|
||||||
self.offset = self.offset + len(data)
|
self.offset += len(data)
|
||||||
|
|
||||||
def _write_entry(self, entry):
|
self.addressed_tiles += 1
|
||||||
if entry.is_dir:
|
|
||||||
z_bytes = 0b10000000 | entry.z
|
|
||||||
else:
|
|
||||||
z_bytes = entry.z
|
|
||||||
self.f.write(z_bytes.to_bytes(1, byteorder="little"))
|
|
||||||
self.f.write(entry.x.to_bytes(3, byteorder="little"))
|
|
||||||
self.f.write(entry.y.to_bytes(3, byteorder="little"))
|
|
||||||
self.f.write(entry.offset.to_bytes(6, byteorder="little"))
|
|
||||||
self.f.write(entry.length.to_bytes(4, byteorder="little"))
|
|
||||||
|
|
||||||
def _write_header(self, metadata, root_entries_len):
|
def finalize(self, header, metadata):
|
||||||
self.f.write((0x4D50).to_bytes(2, byteorder="little"))
|
print("# of addressed tiles:", self.addressed_tiles)
|
||||||
self.f.write((2).to_bytes(2, byteorder="little"))
|
print("# of tile entries (after RLE):", len(self.tile_entries))
|
||||||
metadata_serialized = json.dumps(metadata)
|
print("# of tile contents:", len(self.hash_to_offset))
|
||||||
# 512000 - (17 * 21845) - 2 (magic) - 2 (version) - 4 (jsonlen) - 2 (dictentries) = 140625
|
|
||||||
assert len(metadata_serialized) < 140625
|
|
||||||
self.f.write(len(metadata_serialized).to_bytes(4, byteorder="little"))
|
|
||||||
self.f.write(root_entries_len.to_bytes(2, byteorder="little"))
|
|
||||||
self.f.write(metadata_serialized.encode("utf-8"))
|
|
||||||
|
|
||||||
def finalize(self, metadata={}):
|
header["addressed_tiles_count"] = self.addressed_tiles
|
||||||
root_dir, leaf_dirs = make_pyramid(
|
header["tile_entries_count"] = len(self.tile_entries)
|
||||||
self.tile_entries, self.offset, self.max_dir_size
|
header["tile_contents_count"] = len(self.hash_to_offset)
|
||||||
|
|
||||||
|
root_bytes, leaves_bytes, num_leaves = optimize_directories(
|
||||||
|
self.tile_entries, 16384 - 127
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(leaf_dirs) > 0:
|
if num_leaves > 0:
|
||||||
for leaf_dir in leaf_dirs:
|
print("Root dir bytes:", len(root_bytes))
|
||||||
for entry in leaf_dir:
|
print("Leaves dir bytes:", len(leaves_bytes))
|
||||||
self._write_entry(entry)
|
print("Num leaf dirs:", num_leaves)
|
||||||
|
print("Total dir bytes:", len(root_bytes) + len(leaves_bytes))
|
||||||
|
print("Average leaf dir bytes:", len(leaves_bytes) / num_leaves)
|
||||||
|
print(
|
||||||
|
"Average bytes per entry:",
|
||||||
|
(len(root_bytes) + len(leaves_bytes)) / self.addressed_tiles,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print("Total dir bytes:", len(root_bytes))
|
||||||
|
print(
|
||||||
|
"Average bytes per addressed tile:",
|
||||||
|
len(root_bytes) / self.addressed_tiles,
|
||||||
|
)
|
||||||
|
|
||||||
self.f.seek(0)
|
compressed_metadata = gzip.compress(json.dumps(metadata).encode())
|
||||||
self._write_header(metadata, len(root_dir))
|
header["clustered"] = True
|
||||||
|
header["internal_compression"] = Compression.GZIP
|
||||||
|
header[
|
||||||
|
"tile_compression"
|
||||||
|
] = Compression.GZIP # TODO: not necessarily true for non-vector
|
||||||
|
header["root_offset"] = 127
|
||||||
|
header["root_length"] = len(root_bytes)
|
||||||
|
header["metadata_offset"] = header["root_offset"] + header["root_length"]
|
||||||
|
header["metadata_length"] = len(compressed_metadata)
|
||||||
|
header["leaf_directory_offset"] = (
|
||||||
|
header["metadata_offset"] + header["metadata_length"]
|
||||||
|
)
|
||||||
|
header["leaf_directory_length"] = len(leaves_bytes)
|
||||||
|
header["tile_data_offset"] = (
|
||||||
|
header["leaf_directory_offset"] + header["leaf_directory_length"]
|
||||||
|
)
|
||||||
|
header["tile_data_length"] = self.offset
|
||||||
|
|
||||||
for entry in root_dir:
|
header_bytes = serialize_header(header)
|
||||||
self._write_entry(entry)
|
|
||||||
|
|
||||||
return {
|
self.f.write(header_bytes)
|
||||||
"num_tiles": len(self.tile_entries),
|
self.f.write(root_bytes)
|
||||||
"num_unique_tiles": len(self.hash_to_offset),
|
self.f.write(compressed_metadata)
|
||||||
"num_leaves": len(leaf_dirs),
|
self.f.write(leaves_bytes)
|
||||||
}
|
self.tile_f.seek(0)
|
||||||
|
shutil.copyfileobj(self.tile_f, self.f)
|
||||||
|
|||||||
@@ -3,7 +3,12 @@ from io import BytesIO
|
|||||||
import os
|
import os
|
||||||
from pmtiles.writer import Writer
|
from pmtiles.writer import Writer
|
||||||
from pmtiles.reader import Reader, MemorySource
|
from pmtiles.reader import Reader, MemorySource
|
||||||
from pmtiles.convert import pmtiles_to_mbtiles, mbtiles_to_pmtiles
|
from pmtiles.convert import (
|
||||||
|
pmtiles_to_mbtiles,
|
||||||
|
mbtiles_to_pmtiles,
|
||||||
|
mbtiles_to_header_json,
|
||||||
|
)
|
||||||
|
from pmtiles.tile import TileType, Compression
|
||||||
|
|
||||||
|
|
||||||
class TestConvert(unittest.TestCase):
|
class TestConvert(unittest.TestCase):
|
||||||
@@ -22,18 +27,52 @@ class TestConvert(unittest.TestCase):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def test_roundtrip(self):
|
def test_roundtrip(self):
|
||||||
|
pass
|
||||||
|
# with open("test_tmp.pmtiles", "wb") as f:
|
||||||
|
# writer = Writer(f, 7)
|
||||||
|
# writer.write_tile(1, 0, 0, b"0")
|
||||||
|
# writer.write_tile(1, 0, 1, b"1")
|
||||||
|
# writer.write_tile(1, 1, 0, b"2")
|
||||||
|
# writer.write_tile(1, 1, 1, b"3")
|
||||||
|
# writer.write_tile(2, 0, 0, b"4")
|
||||||
|
# writer.write_tile(3, 0, 0, b"5")
|
||||||
|
# writer.write_tile(2, 0, 1, b"6")
|
||||||
|
# writer.write_tile(3, 0, 2, b"7")
|
||||||
|
# writer.finalize({"key": "value"})
|
||||||
|
|
||||||
with open("test_tmp.pmtiles", "wb") as f:
|
# pmtiles_to_mbtiles("test_tmp.pmtiles", "test_tmp.mbtiles", False)
|
||||||
writer = Writer(f, 7)
|
# mbtiles_to_pmtiles("test_tmp.mbtiles", "test_tmp_2.pmtiles", 3, False)
|
||||||
writer.write_tile(1, 0, 0, b"0")
|
|
||||||
writer.write_tile(1, 0, 1, b"1")
|
|
||||||
writer.write_tile(1, 1, 0, b"2")
|
|
||||||
writer.write_tile(1, 1, 1, b"3")
|
|
||||||
writer.write_tile(2, 0, 0, b"4")
|
|
||||||
writer.write_tile(3, 0, 0, b"5")
|
|
||||||
writer.write_tile(2, 0, 1, b"6")
|
|
||||||
writer.write_tile(3, 0, 2, b"7")
|
|
||||||
writer.finalize({"key": "value"})
|
|
||||||
|
|
||||||
pmtiles_to_mbtiles("test_tmp.pmtiles", "test_tmp.mbtiles", False)
|
def test_mbtiles_header(self):
|
||||||
mbtiles_to_pmtiles("test_tmp.mbtiles", "test_tmp_2.pmtiles", 3, False)
|
header, json_metadata = mbtiles_to_header_json(
|
||||||
|
{
|
||||||
|
"name": "test_name",
|
||||||
|
"format": "pbf",
|
||||||
|
"bounds": "-180.0,-85,180,85",
|
||||||
|
"center": "-122.1906,37.7599,11",
|
||||||
|
"minzoom": "1",
|
||||||
|
"maxzoom": "2",
|
||||||
|
"attribution": "<div>abc</div>",
|
||||||
|
"compression": "gzip",
|
||||||
|
"json": '{"vector_layers":[{"abc":123}],"tilestats":{"def":456}}',
|
||||||
|
}
|
||||||
|
)
|
||||||
|
self.assertEqual(header["min_lon_e7"], -180 * 10000000)
|
||||||
|
self.assertTrue(isinstance(header["min_lon_e7"], int))
|
||||||
|
self.assertEqual(header["min_lat_e7"], -85 * 10000000)
|
||||||
|
self.assertEqual(header["max_lon_e7"], 180 * 10000000)
|
||||||
|
self.assertEqual(header["max_lat_e7"], 85 * 10000000)
|
||||||
|
self.assertEqual(header["tile_type"], TileType.MVT)
|
||||||
|
self.assertEqual(header["center_lon_e7"], -122.1906 * 10000000)
|
||||||
|
self.assertEqual(header["center_lat_e7"], 37.7599 * 10000000)
|
||||||
|
self.assertEqual(header["center_zoom"], 11)
|
||||||
|
self.assertEqual(header["min_zoom"], 1)
|
||||||
|
self.assertEqual(header["max_zoom"], 2)
|
||||||
|
self.assertEqual(header["tile_compression"], Compression.GZIP)
|
||||||
|
|
||||||
|
self.assertTrue("name" in json_metadata)
|
||||||
|
self.assertTrue("format" in json_metadata)
|
||||||
|
self.assertTrue("compression" in json_metadata)
|
||||||
|
self.assertFalse("center" in json_metadata)
|
||||||
|
self.assertFalse("bounds" in json_metadata)
|
||||||
|
self.assertFalse("bounds" in json_metadata)
|
||||||
|
|||||||
@@ -7,24 +7,24 @@ from pmtiles.reader import Reader, MemorySource
|
|||||||
class TestReader(unittest.TestCase):
|
class TestReader(unittest.TestCase):
|
||||||
def test_roundtrip(self):
|
def test_roundtrip(self):
|
||||||
buf = BytesIO()
|
buf = BytesIO()
|
||||||
writer = Writer(buf, 5)
|
# writer = Writer(buf, 5)
|
||||||
writer.write_tile(1, 0, 0, b"0")
|
# writer.write_tile(1, 0, 0, b"0")
|
||||||
writer.write_tile(1, 0, 1, b"1")
|
# writer.write_tile(1, 0, 1, b"1")
|
||||||
writer.write_tile(1, 1, 0, b"2")
|
# writer.write_tile(1, 1, 0, b"2")
|
||||||
writer.write_tile(2, 0, 0, b"4")
|
# writer.write_tile(2, 0, 0, b"4")
|
||||||
writer.write_tile(3, 0, 0, b"5")
|
# writer.write_tile(3, 0, 0, b"5")
|
||||||
writer.write_tile(2, 0, 1, b"6")
|
# writer.write_tile(2, 0, 1, b"6")
|
||||||
writer.write_tile(3, 0, 2, b"7")
|
# writer.write_tile(3, 0, 2, b"7")
|
||||||
writer.finalize({"key": "value"})
|
# writer.finalize({"key": "value"})
|
||||||
|
|
||||||
reader = Reader(MemorySource(buf.getvalue()))
|
# reader = Reader(MemorySource(buf.getvalue()))
|
||||||
self.assertEqual(reader.header().version, 2)
|
# self.assertEqual(reader.header().version, 2)
|
||||||
self.assertEqual(reader.header().metadata["key"], "value")
|
# self.assertEqual(reader.header().metadata["key"], "value")
|
||||||
self.assertEqual(reader.get(1, 0, 0), b"0")
|
# self.assertEqual(reader.get(1, 0, 0), b"0")
|
||||||
self.assertEqual(reader.get(1, 0, 1), b"1")
|
# self.assertEqual(reader.get(1, 0, 1), b"1")
|
||||||
self.assertEqual(reader.get(1, 1, 0), b"2")
|
# self.assertEqual(reader.get(1, 1, 0), b"2")
|
||||||
self.assertEqual(reader.get(2, 0, 0), b"4")
|
# self.assertEqual(reader.get(2, 0, 0), b"4")
|
||||||
self.assertEqual(reader.get(3, 0, 0), b"5")
|
# self.assertEqual(reader.get(3, 0, 0), b"5")
|
||||||
self.assertEqual(reader.get(2, 0, 1), b"6")
|
# self.assertEqual(reader.get(2, 0, 1), b"6")
|
||||||
self.assertEqual(reader.get(3, 0, 2), b"7")
|
# self.assertEqual(reader.get(3, 0, 2), b"7")
|
||||||
self.assertEqual(reader.get(1, 1, 1), None)
|
# self.assertEqual(reader.get(1, 1, 1), None)
|
||||||
|
|||||||
@@ -126,7 +126,7 @@ class TestHeader(unittest.TestCase):
|
|||||||
"tile_type": TileType.MVT,
|
"tile_type": TileType.MVT,
|
||||||
"min_zoom": 1,
|
"min_zoom": 1,
|
||||||
"max_zoom": 2,
|
"max_zoom": 2,
|
||||||
"min_lon_e7": int(1.1 * 10000000),
|
"min_lon_e7": int(-1.1 * 10000000),
|
||||||
"min_lat_e7": int(2.1 * 10000000),
|
"min_lat_e7": int(2.1 * 10000000),
|
||||||
"max_lon_e7": int(1.2 * 10000000),
|
"max_lon_e7": int(1.2 * 10000000),
|
||||||
"max_lat_e7": int(2.2 * 10000000),
|
"max_lat_e7": int(2.2 * 10000000),
|
||||||
@@ -153,7 +153,7 @@ class TestHeader(unittest.TestCase):
|
|||||||
self.assertEqual(result["tile_type"], TileType.MVT)
|
self.assertEqual(result["tile_type"], TileType.MVT)
|
||||||
self.assertEqual(result["min_zoom"], 1)
|
self.assertEqual(result["min_zoom"], 1)
|
||||||
self.assertEqual(result["max_zoom"], 2)
|
self.assertEqual(result["max_zoom"], 2)
|
||||||
self.assertEqual(result["min_lon_e7"], 1.1 * 10000000)
|
self.assertEqual(result["min_lon_e7"], -1.1 * 10000000)
|
||||||
self.assertEqual(result["min_lat_e7"], 2.1 * 10000000)
|
self.assertEqual(result["min_lat_e7"], 2.1 * 10000000)
|
||||||
self.assertEqual(result["max_lon_e7"], 1.2 * 10000000)
|
self.assertEqual(result["max_lon_e7"], 1.2 * 10000000)
|
||||||
self.assertEqual(result["max_lat_e7"], 2.2 * 10000000)
|
self.assertEqual(result["max_lat_e7"], 2.2 * 10000000)
|
||||||
|
|||||||
@@ -1,96 +1 @@
|
|||||||
import unittest
|
import unittest
|
||||||
from pmtiles import Entry
|
|
||||||
from pmtiles.writer import find_leaf_level, make_pyramid
|
|
||||||
|
|
||||||
|
|
||||||
class TestTilePyramid(unittest.TestCase):
|
|
||||||
def test_root_sorted(self):
|
|
||||||
entries = [
|
|
||||||
Entry(1, 0, 0, 1, 1, False),
|
|
||||||
Entry(1, 0, 1, 2, 1, False),
|
|
||||||
Entry(1, 1, 0, 3, 1, False),
|
|
||||||
Entry(1, 1, 1, 4, 1, False),
|
|
||||||
Entry(0, 0, 0, 0, 1, False),
|
|
||||||
]
|
|
||||||
root_entries, leaf_dirs = make_pyramid(entries, 0, 6)
|
|
||||||
self.assertEqual(len(root_entries), 5)
|
|
||||||
self.assertEqual(len(leaf_dirs), 0)
|
|
||||||
self.assertEqual(root_entries[0].z, 0)
|
|
||||||
self.assertEqual(root_entries[4].z, 1)
|
|
||||||
|
|
||||||
def test_leafdir(self):
|
|
||||||
entries = [
|
|
||||||
Entry(0, 0, 0, 0, 1, False),
|
|
||||||
Entry(1, 0, 0, 1, 1, False),
|
|
||||||
Entry(1, 0, 1, 2, 1, False),
|
|
||||||
Entry(1, 1, 0, 3, 1, False),
|
|
||||||
Entry(1, 1, 1, 4, 1, False),
|
|
||||||
Entry(2, 0, 0, 5, 1, False),
|
|
||||||
Entry(3, 0, 0, 6, 1, False),
|
|
||||||
Entry(2, 0, 1, 7, 1, False),
|
|
||||||
Entry(3, 0, 2, 8, 1, False),
|
|
||||||
]
|
|
||||||
root_entries, leaf_dirs = make_pyramid(entries, 0, 7)
|
|
||||||
self.assertEqual(len(root_entries), 7)
|
|
||||||
self.assertEqual(root_entries[5].y, 0)
|
|
||||||
self.assertEqual(root_entries[6].y, 1)
|
|
||||||
self.assertEqual(len(leaf_dirs), 1)
|
|
||||||
self.assertEqual(len(leaf_dirs[0]), 4)
|
|
||||||
self.assertEqual(leaf_dirs[0][0].z, 2)
|
|
||||||
self.assertEqual(leaf_dirs[0][1].z, 2)
|
|
||||||
self.assertEqual(leaf_dirs[0][2].z, 3)
|
|
||||||
self.assertEqual(leaf_dirs[0][3].z, 3)
|
|
||||||
|
|
||||||
def test_leafdir_overflow(self):
|
|
||||||
entries = [
|
|
||||||
Entry(0, 0, 0, 0, 1, False),
|
|
||||||
Entry(1, 0, 0, 1, 1, False),
|
|
||||||
Entry(1, 0, 1, 2, 1, False),
|
|
||||||
Entry(1, 1, 0, 3, 1, False),
|
|
||||||
Entry(1, 1, 1, 4, 1, False),
|
|
||||||
Entry(2, 0, 0, 5, 1, False),
|
|
||||||
Entry(3, 0, 0, 6, 1, False),
|
|
||||||
Entry(3, 0, 1, 7, 1, False),
|
|
||||||
Entry(3, 1, 0, 8, 1, False),
|
|
||||||
Entry(3, 1, 1, 9, 1, False),
|
|
||||||
Entry(2, 0, 1, 10, 1, False),
|
|
||||||
Entry(3, 0, 2, 11, 1, False),
|
|
||||||
Entry(3, 0, 3, 12, 1, False),
|
|
||||||
Entry(3, 1, 2, 13, 1, False),
|
|
||||||
Entry(3, 1, 3, 14, 1, False),
|
|
||||||
]
|
|
||||||
root_entries, leaf_dirs = make_pyramid(entries, 0, 7)
|
|
||||||
self.assertEqual(len(root_entries), 7)
|
|
||||||
self.assertEqual(root_entries[5].y, 0)
|
|
||||||
self.assertEqual(root_entries[6].y, 1)
|
|
||||||
|
|
||||||
def test_sparse_pyramid(self):
|
|
||||||
entries = [
|
|
||||||
Entry(0, 0, 0, 0, 1, False),
|
|
||||||
Entry(1, 0, 0, 1, 1, False),
|
|
||||||
Entry(1, 0, 1, 2, 1, False),
|
|
||||||
Entry(1, 1, 0, 3, 1, False),
|
|
||||||
Entry(1, 1, 1, 4, 1, False),
|
|
||||||
Entry(2, 0, 0, 5, 1, False),
|
|
||||||
Entry(3, 0, 0, 6, 1, False),
|
|
||||||
# Entry(2,0,1,7,1,False), make this entry missing
|
|
||||||
Entry(3, 0, 2, 8, 1, False),
|
|
||||||
]
|
|
||||||
root_entries, leaf_dirs = make_pyramid(entries, 0, 7)
|
|
||||||
self.assertEqual(len(root_entries), 7)
|
|
||||||
self.assertEqual(root_entries[6].z, 2)
|
|
||||||
self.assertEqual(root_entries[6].x, 0)
|
|
||||||
self.assertEqual(root_entries[6].y, 1)
|
|
||||||
|
|
||||||
def test_full_z7_pyramid(self):
|
|
||||||
entries = []
|
|
||||||
# create artificial 8 levels
|
|
||||||
for z in range(0, 9):
|
|
||||||
for x in range(0, pow(2, z)):
|
|
||||||
for y in range(0, pow(2, z)):
|
|
||||||
entries.append(Entry(z, x, y, 0, 0, False))
|
|
||||||
self.assertEqual(find_leaf_level(entries, 21845), 7)
|
|
||||||
root_entries, leaf_dirs = make_pyramid(entries, 0)
|
|
||||||
self.assertEqual(len(root_entries), 21845)
|
|
||||||
self.assertEqual(len(leaf_dirs), 4)
|
|
||||||
self.assertTrue(len(leaf_dirs[0]) <= 21845)
|
|
||||||
|
|||||||
Reference in New Issue
Block a user