-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathzip64_fix.py
More file actions
executable file
·375 lines (302 loc) · 11.9 KB
/
zip64_fix.py
File metadata and controls
executable file
·375 lines (302 loc) · 11.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
#!/usr/bin/env python3
"""
Fix a ZIP file >4GB where offsets are truncated to 32 bits.
Reads central directory at 4GB + stored_offset and rebuilds with correct ZIP64 offsets.
"""
import struct
import sys
import os
from dataclasses import dataclass
from typing import BinaryIO
# ZIP signatures
LOCAL_FILE_HEADER_SIG = b'PK\x03\x04'
CENTRAL_DIR_SIG = b'PK\x01\x02'
END_CENTRAL_DIR_SIG = b'PK\x05\x06'
ZIP64_END_CENTRAL_DIR_SIG = b'PK\x06\x06'
ZIP64_END_CENTRAL_DIR_LOCATOR_SIG = b'PK\x06\x07'
ZIP64_EXTRA_FIELD_TAG = 0x0001
FOUR_GB = 0x100000000
@dataclass
class CentralDirEntry:
"""Represents a central directory entry."""
version_made_by: int
version_needed: int
flags: int
compression: int
mod_time: int
mod_date: int
crc32: int
compressed_size: int
uncompressed_size: int
internal_attr: int
external_attr: int
filename: bytes
extra: bytes
comment: bytes
local_header_offset: int # Stored 32-bit offset
actual_offset: int = 0 # Calculated actual offset
actual_data_offset: int = 0 # Offset where compressed data starts
def find_end_of_central_dir(f: BinaryIO, file_size: int) -> dict:
"""Find and parse the end of central directory record."""
search_start = max(0, file_size - 65557)
f.seek(search_start)
data = f.read()
idx = data.rfind(END_CENTRAL_DIR_SIG)
if idx == -1:
raise ValueError("Could not find end of central directory")
eocd_offset = search_start + idx
eocd_data = data[idx:idx + 22]
_, _, _, _, total_entries, cd_size, cd_offset, _ = struct.unpack('<4sHHHHIIH', eocd_data)
return {
'total_entries': total_entries,
'cd_size': cd_size,
'cd_offset': cd_offset,
'eocd_offset': eocd_offset
}
def read_central_directory(f: BinaryIO, cd_offset: int, cd_size: int) -> list[CentralDirEntry]:
"""Read all central directory entries."""
entries = []
f.seek(cd_offset)
data = f.read(cd_size)
pos = 0
while pos + 46 <= len(data):
if data[pos:pos + 4] != CENTRAL_DIR_SIG:
break
fields = struct.unpack('<4sHHHHHHIIIHHHHHII', data[pos:pos + 46])
(_, version_made, version_needed, flags, compression,
mod_time, mod_date, crc32, comp_size, uncomp_size,
name_len, extra_len, comment_len, _,
internal_attr, external_attr, offset) = fields
pos += 46
filename = data[pos:pos + name_len]
pos += name_len
extra = data[pos:pos + extra_len]
pos += extra_len
comment = data[pos:pos + comment_len]
pos += comment_len
entries.append(CentralDirEntry(
version_made_by=version_made,
version_needed=version_needed,
flags=flags,
compression=compression,
mod_time=mod_time,
mod_date=mod_date,
crc32=crc32,
compressed_size=comp_size,
uncompressed_size=uncomp_size,
internal_attr=internal_attr,
external_attr=external_attr,
filename=filename,
extra=extra,
comment=comment,
local_header_offset=offset
))
return entries
def calculate_offset_adjustments(entries: list[CentralDirEntry]):
"""Calculate actual offsets based on 4GB boundary crossings."""
current_adjustment = 0
prev_actual_end = 0
for i, entry in enumerate(entries):
stored_offset = entry.local_header_offset
if i > 0:
actual_with_current_adj = stored_offset + current_adjustment
if actual_with_current_adj < prev_actual_end:
current_adjustment += FOUR_GB
entry.actual_offset = stored_offset + current_adjustment
# Calculate where data ends (for next iteration)
header_size = 30 + len(entry.filename) + len(entry.extra)
prev_actual_end = entry.actual_offset + header_size + entry.compressed_size
def calculate_data_offsets(f: BinaryIO, entries: list[CentralDirEntry]):
"""Calculate actual data offsets by reading local headers."""
for entry in entries:
# Read local header to get actual extra field length
f.seek(entry.actual_offset + 26)
data = f.read(4)
name_len, extra_len = struct.unpack('<HH', data)
entry.actual_data_offset = entry.actual_offset + 30 + name_len + extra_len
def write_fixed_zip(entries: list[CentralDirEntry], input_path: str, output_path: str):
"""Write a new ZIP64 file with corrected offsets."""
print(f"\nWriting fixed ZIP to {output_path}...")
with open(input_path, 'rb') as fin, open(output_path, 'wb') as fout:
new_offsets = []
for i, entry in enumerate(entries):
new_offset = fout.tell()
new_offsets.append(new_offset)
# Write new local file header
write_local_file_header(fout, entry)
# Copy compressed data from original file
fin.seek(entry.actual_data_offset)
remaining = entry.compressed_size
while remaining > 0:
chunk = fin.read(min(remaining, 1024 * 1024))
if not chunk:
break
fout.write(chunk)
remaining -= len(chunk)
if (i + 1) % 100 == 0:
print(f" Copied {i + 1}/{len(entries)} entries...")
# Write central directory
cd_start = fout.tell()
for i, entry in enumerate(entries):
write_central_dir_entry(fout, entry, new_offsets[i])
cd_end = fout.tell()
cd_size = cd_end - cd_start
# Write end records
write_end_records(fout, len(entries), cd_size, cd_start)
print(f"Done! Output: {output_path}")
def write_local_file_header(f: BinaryIO, entry: CentralDirEntry):
"""Write a local file header with proper sizes."""
# Determine if we need ZIP64 for sizes
need_zip64 = (entry.compressed_size >= 0xFFFFFFFF or
entry.uncompressed_size >= 0xFFFFFFFF)
if need_zip64:
zip64_extra = struct.pack('<HHQQ',
ZIP64_EXTRA_FIELD_TAG, 16,
entry.uncompressed_size, entry.compressed_size)
header_comp = 0xFFFFFFFF
header_uncomp = 0xFFFFFFFF
version = 45
else:
zip64_extra = b''
header_comp = entry.compressed_size
header_uncomp = entry.uncompressed_size
version = entry.version_needed
other_extra = filter_zip64_extra(entry.extra)
new_extra = zip64_extra + other_extra
header = struct.pack('<4sHHHHHIIIHH',
LOCAL_FILE_HEADER_SIG,
version,
entry.flags & ~0x08, # Clear data descriptor flag
entry.compression,
entry.mod_time,
entry.mod_date,
entry.crc32,
header_comp,
header_uncomp,
len(entry.filename),
len(new_extra)
)
f.write(header)
f.write(entry.filename)
f.write(new_extra)
def write_central_dir_entry(f: BinaryIO, entry: CentralDirEntry, offset: int):
"""Write a central directory entry."""
# Determine which fields need ZIP64
need_zip64_uncomp = entry.uncompressed_size >= 0xFFFFFFFF
need_zip64_comp = entry.compressed_size >= 0xFFFFFFFF
need_zip64_offset = offset >= 0xFFFFFFFF
# Build ZIP64 extra field with only needed fields
zip64_data = b''
if need_zip64_uncomp:
zip64_data += struct.pack('<Q', entry.uncompressed_size)
if need_zip64_comp:
zip64_data += struct.pack('<Q', entry.compressed_size)
if need_zip64_offset:
zip64_data += struct.pack('<Q', offset)
if zip64_data:
zip64_extra = struct.pack('<HH', ZIP64_EXTRA_FIELD_TAG, len(zip64_data)) + zip64_data
else:
zip64_extra = b''
other_extra = filter_zip64_extra(entry.extra)
new_extra = zip64_extra + other_extra
header_uncomp = 0xFFFFFFFF if need_zip64_uncomp else entry.uncompressed_size
header_comp = 0xFFFFFFFF if need_zip64_comp else entry.compressed_size
header_offset = 0xFFFFFFFF if need_zip64_offset else offset
version_needed = 45 if zip64_data else entry.version_needed
header = struct.pack('<4sHHHHHHIIIHHHHHII',
CENTRAL_DIR_SIG,
entry.version_made_by,
version_needed,
entry.flags & ~0x08,
entry.compression,
entry.mod_time,
entry.mod_date,
entry.crc32,
header_comp,
header_uncomp,
len(entry.filename),
len(new_extra),
len(entry.comment),
0, # Disk number
entry.internal_attr,
entry.external_attr,
header_offset
)
f.write(header)
f.write(entry.filename)
f.write(new_extra)
f.write(entry.comment)
def filter_zip64_extra(extra: bytes) -> bytes:
"""Remove any existing ZIP64 extra field."""
result = b''
pos = 0
while pos + 4 <= len(extra):
tag, size = struct.unpack('<HH', extra[pos:pos + 4])
if tag != ZIP64_EXTRA_FIELD_TAG:
result += extra[pos:pos + 4 + size]
pos += 4 + size
return result
def write_end_records(f: BinaryIO, num_entries: int, cd_size: int, cd_offset: int):
"""Write end of central directory records, using ZIP64 only if needed."""
need_zip64 = (num_entries >= 0xFFFF or cd_size >= 0xFFFFFFFF or cd_offset >= 0xFFFFFFFF)
if need_zip64:
# ZIP64 end of central directory
zip64_eocd_offset = f.tell()
f.write(struct.pack('<4sQHHIIQQQQ',
ZIP64_END_CENTRAL_DIR_SIG,
44, 45, 45, 0, 0,
num_entries, num_entries, cd_size, cd_offset))
# ZIP64 end of central directory locator
f.write(struct.pack('<4sIQI',
ZIP64_END_CENTRAL_DIR_LOCATOR_SIG,
0, zip64_eocd_offset, 1))
# Regular EOCD with markers
f.write(struct.pack('<4sHHHHIIH',
END_CENTRAL_DIR_SIG,
0, 0, 0xFFFF, 0xFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0))
else:
# Regular EOCD only
f.write(struct.pack('<4sHHHHIIH',
END_CENTRAL_DIR_SIG,
0, 0, num_entries, num_entries, cd_size, cd_offset, 0))
def main():
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} <input.zip> [output.zip]")
print("\nFixes a ZIP file >4GB with truncated 32-bit offsets.")
sys.exit(1)
input_path = sys.argv[1]
output_path = sys.argv[2] if len(sys.argv) > 2 else input_path.rsplit('.', 1)[0] + '_fixed.zip'
if not os.path.exists(input_path):
print(f"Error: {input_path} not found")
sys.exit(1)
file_size = os.path.getsize(input_path)
print(f"Input: {input_path} ({file_size:,} bytes, {file_size / (1024**3):.2f} GB)")
with open(input_path, 'rb') as f:
# Find end of central directory
eocd = find_end_of_central_dir(f, file_size)
print(f"\nEnd of central directory found at offset {eocd['eocd_offset']:,}")
print(f" Stored CD offset: {eocd['cd_offset']:,} (0x{eocd['cd_offset']:08X})")
print(f" CD size: {eocd['cd_size']:,}")
print(f" Total entries: {eocd['total_entries']}")
# CD is right before the EOCD
actual_cd_offset = eocd['eocd_offset'] - eocd['cd_size']
print(f"\nActual CD offset (EOCD - CD size): {actual_cd_offset:,}")
entries = read_central_directory(f, actual_cd_offset, eocd['cd_size'])
print(f"Read {len(entries)} entries")
if not entries:
print("Error: No entries found")
sys.exit(1)
# Calculate per-entry offset adjustments
calculate_offset_adjustments(entries)
# Show first few entries
print("\nFirst entries:")
for entry in entries[:5]:
filename = entry.filename.decode('utf-8', errors='replace')
print(f" {filename}")
print(f" stored: {entry.local_header_offset:,} -> actual: {entry.actual_offset:,}")
# Calculate data offsets from local headers
calculate_data_offsets(f, entries)
# Write fixed file
write_fixed_zip(entries, input_path, output_path)
if __name__ == '__main__':
main()