Line 0
Link Here
|
|
|
1 |
/* |
2 |
* Squashfs - a compressed read only filesystem for Linux |
3 |
* |
4 |
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007 |
5 |
* Phillip Lougher <phillip@lougher.demon.co.uk> |
6 |
* |
7 |
* This program is free software; you can redistribute it and/or |
8 |
* modify it under the terms of the GNU General Public License |
9 |
* as published by the Free Software Foundation; either version 2, |
10 |
* or (at your option) any later version. |
11 |
* |
12 |
* This program is distributed in the hope that it will be useful, |
13 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 |
* GNU General Public License for more details. |
16 |
* |
17 |
* You should have received a copy of the GNU General Public License |
18 |
* along with this program; if not, write to the Free Software |
19 |
* Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
20 |
* |
21 |
* inode.c |
22 |
*/ |
23 |
|
24 |
#include <linux/squashfs_fs.h> |
25 |
#include <linux/module.h> |
26 |
#include <linux/zlib.h> |
27 |
#include <linux/fs.h> |
28 |
#include <linux/squashfs_fs_sb.h> |
29 |
#include <linux/squashfs_fs_i.h> |
30 |
#include <linux/buffer_head.h> |
31 |
#include <linux/vfs.h> |
32 |
#include <linux/vmalloc.h> |
33 |
#include <linux/smp_lock.h> |
34 |
#include <linux/exportfs.h> |
35 |
|
36 |
#include "squashfs.h" |
37 |
|
38 |
int squashfs_cached_blks; |
39 |
|
40 |
static struct dentry *squashfs_get_parent(struct dentry *child); |
41 |
static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode); |
42 |
static int squashfs_statfs(struct dentry *, struct kstatfs *); |
43 |
static int squashfs_symlink_readpage(struct file *file, struct page *page); |
44 |
static long long read_blocklist(struct inode *inode, int index, |
45 |
int readahead_blks, char *block_list, |
46 |
unsigned short **block_p, unsigned int *bsize); |
47 |
static int squashfs_readpage(struct file *file, struct page *page); |
48 |
static int squashfs_readdir(struct file *, void *, filldir_t); |
49 |
static struct dentry *squashfs_lookup(struct inode *, struct dentry *, |
50 |
struct nameidata *); |
51 |
static int squashfs_remount(struct super_block *s, int *flags, char *data); |
52 |
static void squashfs_put_super(struct super_block *); |
53 |
static int squashfs_get_sb(struct file_system_type *,int, const char *, void *, |
54 |
struct vfsmount *); |
55 |
static struct inode *squashfs_alloc_inode(struct super_block *sb); |
56 |
static void squashfs_destroy_inode(struct inode *inode); |
57 |
static int init_inodecache(void); |
58 |
static void destroy_inodecache(void); |
59 |
|
60 |
static struct file_system_type squashfs_fs_type = { |
61 |
.owner = THIS_MODULE, |
62 |
.name = "squashfs", |
63 |
.get_sb = squashfs_get_sb, |
64 |
.kill_sb = kill_block_super, |
65 |
.fs_flags = FS_REQUIRES_DEV |
66 |
}; |
67 |
|
68 |
static const unsigned char squashfs_filetype_table[] = { |
69 |
DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK |
70 |
}; |
71 |
|
72 |
static struct super_operations squashfs_super_ops = { |
73 |
.alloc_inode = squashfs_alloc_inode, |
74 |
.destroy_inode = squashfs_destroy_inode, |
75 |
.statfs = squashfs_statfs, |
76 |
.put_super = squashfs_put_super, |
77 |
.remount_fs = squashfs_remount |
78 |
}; |
79 |
|
80 |
static struct super_operations squashfs_export_super_ops = { |
81 |
.alloc_inode = squashfs_alloc_inode, |
82 |
.destroy_inode = squashfs_destroy_inode, |
83 |
.statfs = squashfs_statfs, |
84 |
.put_super = squashfs_put_super, |
85 |
}; |
86 |
|
87 |
static struct export_operations squashfs_export_ops = { |
88 |
.get_parent = squashfs_get_parent |
89 |
}; |
90 |
|
91 |
SQSH_EXTERN const struct address_space_operations squashfs_symlink_aops = { |
92 |
.readpage = squashfs_symlink_readpage |
93 |
}; |
94 |
|
95 |
SQSH_EXTERN const struct address_space_operations squashfs_aops = { |
96 |
.readpage = squashfs_readpage |
97 |
}; |
98 |
|
99 |
static const struct file_operations squashfs_dir_ops = { |
100 |
.read = generic_read_dir, |
101 |
.readdir = squashfs_readdir |
102 |
}; |
103 |
|
104 |
SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = { |
105 |
.lookup = squashfs_lookup |
106 |
}; |
107 |
|
108 |
|
109 |
static struct buffer_head *get_block_length(struct super_block *s, |
110 |
int *cur_index, int *offset, int *c_byte) |
111 |
{ |
112 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
113 |
unsigned short temp; |
114 |
struct buffer_head *bh; |
115 |
|
116 |
if (!(bh = sb_bread(s, *cur_index))) |
117 |
goto out; |
118 |
|
119 |
if (msblk->devblksize - *offset == 1) { |
120 |
if (msblk->swap) |
121 |
((unsigned char *) &temp)[1] = *((unsigned char *) |
122 |
(bh->b_data + *offset)); |
123 |
else |
124 |
((unsigned char *) &temp)[0] = *((unsigned char *) |
125 |
(bh->b_data + *offset)); |
126 |
brelse(bh); |
127 |
if (!(bh = sb_bread(s, ++(*cur_index)))) |
128 |
goto out; |
129 |
if (msblk->swap) |
130 |
((unsigned char *) &temp)[0] = *((unsigned char *) |
131 |
bh->b_data); |
132 |
else |
133 |
((unsigned char *) &temp)[1] = *((unsigned char *) |
134 |
bh->b_data); |
135 |
*c_byte = temp; |
136 |
*offset = 1; |
137 |
} else { |
138 |
if (msblk->swap) { |
139 |
((unsigned char *) &temp)[1] = *((unsigned char *) |
140 |
(bh->b_data + *offset)); |
141 |
((unsigned char *) &temp)[0] = *((unsigned char *) |
142 |
(bh->b_data + *offset + 1)); |
143 |
} else { |
144 |
((unsigned char *) &temp)[0] = *((unsigned char *) |
145 |
(bh->b_data + *offset)); |
146 |
((unsigned char *) &temp)[1] = *((unsigned char *) |
147 |
(bh->b_data + *offset + 1)); |
148 |
} |
149 |
*c_byte = temp; |
150 |
*offset += 2; |
151 |
} |
152 |
|
153 |
if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) { |
154 |
if (*offset == msblk->devblksize) { |
155 |
brelse(bh); |
156 |
if (!(bh = sb_bread(s, ++(*cur_index)))) |
157 |
goto out; |
158 |
*offset = 0; |
159 |
} |
160 |
if (*((unsigned char *) (bh->b_data + *offset)) != |
161 |
SQUASHFS_MARKER_BYTE) { |
162 |
ERROR("Metadata block marker corrupt @ %x\n", |
163 |
*cur_index); |
164 |
brelse(bh); |
165 |
goto out; |
166 |
} |
167 |
(*offset)++; |
168 |
} |
169 |
return bh; |
170 |
|
171 |
out: |
172 |
return NULL; |
173 |
} |
174 |
|
175 |
|
176 |
SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer, |
177 |
long long index, unsigned int length, |
178 |
long long *next_index, int srclength) |
179 |
{ |
180 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
181 |
struct squashfs_super_block *sblk = &msblk->sblk; |
182 |
struct buffer_head **bh; |
183 |
unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1); |
184 |
unsigned int cur_index = index >> msblk->devblksize_log2; |
185 |
int bytes, avail_bytes, b = 0, k = 0; |
186 |
unsigned int compressed; |
187 |
unsigned int c_byte = length; |
188 |
|
189 |
bh = kmalloc(((sblk->block_size >> msblk->devblksize_log2) + 1) * |
190 |
sizeof(struct buffer_head *), GFP_KERNEL); |
191 |
if (bh == NULL) |
192 |
goto read_failure; |
193 |
|
194 |
if (c_byte) { |
195 |
bytes = msblk->devblksize - offset; |
196 |
compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte); |
197 |
c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte); |
198 |
|
199 |
TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", index, |
200 |
compressed ? "" : "un", (unsigned int) c_byte, srclength); |
201 |
|
202 |
if (c_byte > srclength || index < 0 || (index + c_byte) > sblk->bytes_used) |
203 |
goto read_failure; |
204 |
|
205 |
bh[0] = sb_getblk(s, cur_index); |
206 |
if (bh[0] == NULL) |
207 |
goto block_release; |
208 |
|
209 |
for (b = 1; bytes < c_byte; b++) { |
210 |
bh[b] = sb_getblk(s, ++cur_index); |
211 |
if (bh[b] == NULL) |
212 |
goto block_release; |
213 |
bytes += msblk->devblksize; |
214 |
} |
215 |
ll_rw_block(READ, b, bh); |
216 |
} else { |
217 |
if (index < 0 || (index + 2) > sblk->bytes_used) |
218 |
goto read_failure; |
219 |
|
220 |
bh[0] = get_block_length(s, &cur_index, &offset, &c_byte); |
221 |
if (bh[0] == NULL) |
222 |
goto read_failure; |
223 |
|
224 |
bytes = msblk->devblksize - offset; |
225 |
compressed = SQUASHFS_COMPRESSED(c_byte); |
226 |
c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte); |
227 |
|
228 |
TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed |
229 |
? "" : "un", (unsigned int) c_byte); |
230 |
|
231 |
if (c_byte > srclength || (index + c_byte) > sblk->bytes_used) |
232 |
goto read_failure; |
233 |
|
234 |
for (b = 1; bytes < c_byte; b++) { |
235 |
bh[b] = sb_getblk(s, ++cur_index); |
236 |
if (bh[b] == NULL) |
237 |
goto block_release; |
238 |
bytes += msblk->devblksize; |
239 |
} |
240 |
ll_rw_block(READ, b - 1, bh + 1); |
241 |
} |
242 |
|
243 |
if (compressed) { |
244 |
int zlib_err = 0; |
245 |
|
246 |
/* |
247 |
* uncompress block |
248 |
*/ |
249 |
|
250 |
mutex_lock(&msblk->read_data_mutex); |
251 |
|
252 |
msblk->stream.next_out = buffer; |
253 |
msblk->stream.avail_out = srclength; |
254 |
|
255 |
for (bytes = 0; k < b; k++) { |
256 |
avail_bytes = min(c_byte - bytes, msblk->devblksize - offset); |
257 |
|
258 |
wait_on_buffer(bh[k]); |
259 |
if (!buffer_uptodate(bh[k])) |
260 |
goto release_mutex; |
261 |
|
262 |
msblk->stream.next_in = bh[k]->b_data + offset; |
263 |
msblk->stream.avail_in = avail_bytes; |
264 |
|
265 |
if (k == 0) { |
266 |
zlib_err = zlib_inflateInit(&msblk->stream); |
267 |
if (zlib_err != Z_OK) { |
268 |
ERROR("zlib_inflateInit returned unexpected result 0x%x," |
269 |
" srclength %d\n", zlib_err, srclength); |
270 |
goto release_mutex; |
271 |
} |
272 |
|
273 |
if (avail_bytes == 0) { |
274 |
offset = 0; |
275 |
brelse(bh[k]); |
276 |
continue; |
277 |
} |
278 |
} |
279 |
|
280 |
zlib_err = zlib_inflate(&msblk->stream, Z_NO_FLUSH); |
281 |
if (zlib_err != Z_OK && zlib_err != Z_STREAM_END) { |
282 |
ERROR("zlib_inflate returned unexpected result 0x%x," |
283 |
" srclength %d, avail_in %d, avail_out %d\n", zlib_err, |
284 |
srclength, msblk->stream.avail_in, msblk->stream.avail_out); |
285 |
goto release_mutex; |
286 |
} |
287 |
|
288 |
bytes += avail_bytes; |
289 |
offset = 0; |
290 |
brelse(bh[k]); |
291 |
} |
292 |
|
293 |
if (zlib_err != Z_STREAM_END) |
294 |
goto release_mutex; |
295 |
|
296 |
zlib_err = zlib_inflateEnd(&msblk->stream); |
297 |
if (zlib_err != Z_OK) { |
298 |
ERROR("zlib_inflateEnd returned unexpected result 0x%x," |
299 |
" srclength %d\n", zlib_err, srclength); |
300 |
goto release_mutex; |
301 |
} |
302 |
bytes = msblk->stream.total_out; |
303 |
mutex_unlock(&msblk->read_data_mutex); |
304 |
} else { |
305 |
int i; |
306 |
|
307 |
for(i = 0; i < b; i++) { |
308 |
wait_on_buffer(bh[i]); |
309 |
if (!buffer_uptodate(bh[i])) |
310 |
goto block_release; |
311 |
} |
312 |
|
313 |
for (bytes = 0; k < b; k++) { |
314 |
avail_bytes = min(c_byte - bytes, msblk->devblksize - offset); |
315 |
|
316 |
memcpy(buffer + bytes, bh[k]->b_data + offset, avail_bytes); |
317 |
bytes += avail_bytes; |
318 |
offset = 0; |
319 |
brelse(bh[k]); |
320 |
} |
321 |
} |
322 |
|
323 |
if (next_index) |
324 |
*next_index = index + c_byte + (length ? 0 : |
325 |
(SQUASHFS_CHECK_DATA(msblk->sblk.flags) ? 3 : 2)); |
326 |
|
327 |
kfree(bh); |
328 |
return bytes; |
329 |
|
330 |
release_mutex: |
331 |
mutex_unlock(&msblk->read_data_mutex); |
332 |
|
333 |
block_release: |
334 |
for (; k < b; k++) |
335 |
brelse(bh[k]); |
336 |
|
337 |
read_failure: |
338 |
ERROR("sb_bread failed reading block 0x%x\n", cur_index); |
339 |
kfree(bh); |
340 |
return 0; |
341 |
} |
342 |
|
343 |
|
344 |
SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, void *buffer, |
345 |
long long block, unsigned int offset, |
346 |
int length, long long *next_block, |
347 |
unsigned int *next_offset) |
348 |
{ |
349 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
350 |
int n, i, bytes, return_length = length; |
351 |
long long next_index; |
352 |
|
353 |
TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset); |
354 |
|
355 |
while (1) { |
356 |
for (i = 0; i < squashfs_cached_blks; i++) |
357 |
if (msblk->block_cache[i].block == block) |
358 |
break; |
359 |
|
360 |
mutex_lock(&msblk->block_cache_mutex); |
361 |
|
362 |
if (i == squashfs_cached_blks) { |
363 |
/* read inode header block */ |
364 |
if (msblk->unused_cache_blks == 0) { |
365 |
mutex_unlock(&msblk->block_cache_mutex); |
366 |
wait_event(msblk->waitq, msblk->unused_cache_blks); |
367 |
continue; |
368 |
} |
369 |
|
370 |
i = msblk->next_cache; |
371 |
for (n = 0; n < squashfs_cached_blks; n++) { |
372 |
if (msblk->block_cache[i].block != SQUASHFS_USED_BLK) |
373 |
break; |
374 |
i = (i + 1) % squashfs_cached_blks; |
375 |
} |
376 |
|
377 |
msblk->next_cache = (i + 1) % squashfs_cached_blks; |
378 |
|
379 |
if (msblk->block_cache[i].block == SQUASHFS_INVALID_BLK) { |
380 |
msblk->block_cache[i].data = vmalloc(SQUASHFS_METADATA_SIZE); |
381 |
if (msblk->block_cache[i].data == NULL) { |
382 |
ERROR("Failed to allocate cache block\n"); |
383 |
mutex_unlock(&msblk->block_cache_mutex); |
384 |
goto out; |
385 |
} |
386 |
} |
387 |
|
388 |
msblk->block_cache[i].block = SQUASHFS_USED_BLK; |
389 |
msblk->unused_cache_blks --; |
390 |
mutex_unlock(&msblk->block_cache_mutex); |
391 |
|
392 |
msblk->block_cache[i].length = squashfs_read_data(s, |
393 |
msblk->block_cache[i].data, block, 0, &next_index, |
394 |
SQUASHFS_METADATA_SIZE); |
395 |
|
396 |
if (msblk->block_cache[i].length == 0) { |
397 |
ERROR("Unable to read cache block [%llx:%x]\n", block, offset); |
398 |
mutex_lock(&msblk->block_cache_mutex); |
399 |
msblk->block_cache[i].block = SQUASHFS_INVALID_BLK; |
400 |
msblk->unused_cache_blks ++; |
401 |
smp_mb(); |
402 |
vfree(msblk->block_cache[i].data); |
403 |
wake_up(&msblk->waitq); |
404 |
mutex_unlock(&msblk->block_cache_mutex); |
405 |
goto out; |
406 |
} |
407 |
|
408 |
mutex_lock(&msblk->block_cache_mutex); |
409 |
msblk->block_cache[i].block = block; |
410 |
msblk->block_cache[i].next_index = next_index; |
411 |
msblk->unused_cache_blks ++; |
412 |
smp_mb(); |
413 |
wake_up(&msblk->waitq); |
414 |
TRACE("Read cache block [%llx:%x]\n", block, offset); |
415 |
} |
416 |
|
417 |
if (msblk->block_cache[i].block != block) { |
418 |
mutex_unlock(&msblk->block_cache_mutex); |
419 |
continue; |
420 |
} |
421 |
|
422 |
bytes = msblk->block_cache[i].length - offset; |
423 |
|
424 |
if (bytes < 1) { |
425 |
mutex_unlock(&msblk->block_cache_mutex); |
426 |
goto out; |
427 |
} else if (bytes >= length) { |
428 |
if (buffer) |
429 |
memcpy(buffer, msblk->block_cache[i].data + offset, length); |
430 |
if (msblk->block_cache[i].length - offset == length) { |
431 |
*next_block = msblk->block_cache[i].next_index; |
432 |
*next_offset = 0; |
433 |
} else { |
434 |
*next_block = block; |
435 |
*next_offset = offset + length; |
436 |
} |
437 |
mutex_unlock(&msblk->block_cache_mutex); |
438 |
goto finish; |
439 |
} else { |
440 |
if (buffer) { |
441 |
memcpy(buffer, msblk->block_cache[i].data + offset, bytes); |
442 |
buffer = (char *) buffer + bytes; |
443 |
} |
444 |
block = msblk->block_cache[i].next_index; |
445 |
mutex_unlock(&msblk->block_cache_mutex); |
446 |
length -= bytes; |
447 |
offset = 0; |
448 |
} |
449 |
} |
450 |
|
451 |
finish: |
452 |
return return_length; |
453 |
out: |
454 |
return 0; |
455 |
} |
456 |
|
457 |
|
458 |
static int get_fragment_location(struct super_block *s, unsigned int fragment, |
459 |
long long *fragment_start_block, |
460 |
unsigned int *fragment_size) |
461 |
{ |
462 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
463 |
long long start_block = |
464 |
msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)]; |
465 |
int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment); |
466 |
struct squashfs_fragment_entry fragment_entry; |
467 |
|
468 |
if (msblk->swap) { |
469 |
struct squashfs_fragment_entry sfragment_entry; |
470 |
|
471 |
if (!squashfs_get_cached_block(s, &sfragment_entry, start_block, offset, |
472 |
sizeof(sfragment_entry), &start_block, &offset)) |
473 |
goto out; |
474 |
SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry); |
475 |
} else |
476 |
if (!squashfs_get_cached_block(s, &fragment_entry, start_block, offset, |
477 |
sizeof(fragment_entry), &start_block, &offset)) |
478 |
goto out; |
479 |
|
480 |
*fragment_start_block = fragment_entry.start_block; |
481 |
*fragment_size = fragment_entry.size; |
482 |
|
483 |
return 1; |
484 |
|
485 |
out: |
486 |
return 0; |
487 |
} |
488 |
|
489 |
|
490 |
SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, |
491 |
struct squashfs_fragment_cache *fragment) |
492 |
{ |
493 |
mutex_lock(&msblk->fragment_mutex); |
494 |
fragment->locked --; |
495 |
if (fragment->locked == 0) { |
496 |
msblk->unused_frag_blks ++; |
497 |
smp_mb(); |
498 |
wake_up(&msblk->fragment_wait_queue); |
499 |
} |
500 |
mutex_unlock(&msblk->fragment_mutex); |
501 |
} |
502 |
|
503 |
|
504 |
SQSH_EXTERN |
505 |
struct squashfs_fragment_cache *get_cached_fragment(struct super_block *s, |
506 |
long long start_block, int length) |
507 |
{ |
508 |
int i, n; |
509 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
510 |
struct squashfs_super_block *sblk = &msblk->sblk; |
511 |
|
512 |
while (1) { |
513 |
mutex_lock(&msblk->fragment_mutex); |
514 |
|
515 |
for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS && |
516 |
msblk->fragment[i].block != start_block; i++); |
517 |
|
518 |
if (i == SQUASHFS_CACHED_FRAGMENTS) { |
519 |
if (msblk->unused_frag_blks == 0) { |
520 |
mutex_unlock(&msblk->fragment_mutex); |
521 |
wait_event(msblk->fragment_wait_queue, msblk->unused_frag_blks); |
522 |
continue; |
523 |
} |
524 |
|
525 |
i = msblk->next_fragment; |
526 |
for (n = 0; n < SQUASHFS_CACHED_FRAGMENTS; n++) { |
527 |
if (msblk->fragment[i].locked == 0) |
528 |
break; |
529 |
i = (i + 1) % SQUASHFS_CACHED_FRAGMENTS; |
530 |
} |
531 |
|
532 |
msblk->next_fragment = (msblk->next_fragment + 1) % |
533 |
SQUASHFS_CACHED_FRAGMENTS; |
534 |
|
535 |
if (msblk->fragment[i].data == NULL) { |
536 |
msblk->fragment[i].data = vmalloc(sblk->block_size); |
537 |
if (msblk->fragment[i].data == NULL) { |
538 |
ERROR("Failed to allocate fragment cache block\n"); |
539 |
mutex_unlock(&msblk->fragment_mutex); |
540 |
goto out; |
541 |
} |
542 |
} |
543 |
|
544 |
msblk->unused_frag_blks --; |
545 |
msblk->fragment[i].block = SQUASHFS_INVALID_BLK; |
546 |
msblk->fragment[i].locked = 1; |
547 |
mutex_unlock(&msblk->fragment_mutex); |
548 |
|
549 |
msblk->fragment[i].length = squashfs_read_data(s, |
550 |
msblk->fragment[i].data, start_block, length, NULL, |
551 |
sblk->block_size); |
552 |
|
553 |
if (msblk->fragment[i].length == 0) { |
554 |
ERROR("Unable to read fragment cache block [%llx]\n", start_block); |
555 |
msblk->fragment[i].locked = 0; |
556 |
msblk->unused_frag_blks ++; |
557 |
smp_mb(); |
558 |
wake_up(&msblk->fragment_wait_queue); |
559 |
goto out; |
560 |
} |
561 |
|
562 |
mutex_lock(&msblk->fragment_mutex); |
563 |
msblk->fragment[i].block = start_block; |
564 |
TRACE("New fragment %d, start block %lld, locked %d\n", |
565 |
i, msblk->fragment[i].block, msblk->fragment[i].locked); |
566 |
mutex_unlock(&msblk->fragment_mutex); |
567 |
break; |
568 |
} |
569 |
|
570 |
if (msblk->fragment[i].locked == 0) |
571 |
msblk->unused_frag_blks --; |
572 |
msblk->fragment[i].locked++; |
573 |
mutex_unlock(&msblk->fragment_mutex); |
574 |
TRACE("Got fragment %d, start block %lld, locked %d\n", i, |
575 |
msblk->fragment[i].block, msblk->fragment[i].locked); |
576 |
break; |
577 |
} |
578 |
|
579 |
return &msblk->fragment[i]; |
580 |
|
581 |
out: |
582 |
return NULL; |
583 |
} |
584 |
|
585 |
|
586 |
static void squashfs_new_inode(struct squashfs_sb_info *msblk, struct inode *i, |
587 |
struct squashfs_base_inode_header *inodeb) |
588 |
{ |
589 |
i->i_ino = inodeb->inode_number; |
590 |
i->i_mtime.tv_sec = inodeb->mtime; |
591 |
i->i_atime.tv_sec = inodeb->mtime; |
592 |
i->i_ctime.tv_sec = inodeb->mtime; |
593 |
i->i_uid = msblk->uid[inodeb->uid]; |
594 |
i->i_mode = inodeb->mode; |
595 |
i->i_size = 0; |
596 |
|
597 |
if (inodeb->guid == SQUASHFS_GUIDS) |
598 |
i->i_gid = i->i_uid; |
599 |
else |
600 |
i->i_gid = msblk->guid[inodeb->guid]; |
601 |
} |
602 |
|
603 |
|
604 |
static squashfs_inode_t squashfs_inode_lookup(struct super_block *s, int ino) |
605 |
{ |
606 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
607 |
long long start = msblk->inode_lookup_table[SQUASHFS_LOOKUP_BLOCK(ino - 1)]; |
608 |
int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino - 1); |
609 |
squashfs_inode_t inode; |
610 |
|
611 |
TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino); |
612 |
|
613 |
if (msblk->swap) { |
614 |
squashfs_inode_t sinode; |
615 |
|
616 |
if (!squashfs_get_cached_block(s, &sinode, start, offset, |
617 |
sizeof(sinode), &start, &offset)) |
618 |
goto out; |
619 |
SQUASHFS_SWAP_INODE_T((&inode), &sinode); |
620 |
} else if (!squashfs_get_cached_block(s, &inode, start, offset, |
621 |
sizeof(inode), &start, &offset)) |
622 |
goto out; |
623 |
|
624 |
TRACE("squashfs_inode_lookup, inode = 0x%llx\n", inode); |
625 |
|
626 |
return inode; |
627 |
|
628 |
out: |
629 |
return SQUASHFS_INVALID_BLK; |
630 |
} |
631 |
|
632 |
static struct dentry *squashfs_get_parent(struct dentry *child) |
633 |
{ |
634 |
struct inode *i = child->d_inode; |
635 |
unsigned long ino = SQUASHFS_I(i)->u.s2.parent_inode; |
636 |
squashfs_inode_t inode = squashfs_inode_lookup(i->i_sb, ino); |
637 |
struct inode *parent; |
638 |
struct dentry *rv; |
639 |
|
640 |
TRACE("Entered squashfs_get_parent\n"); |
641 |
|
642 |
if (inode == SQUASHFS_INVALID_BLK) |
643 |
return ERR_PTR(-EINVAL); |
644 |
|
645 |
parent = squashfs_iget(i->i_sb, inode, ino); |
646 |
if (IS_ERR(parent)) { |
647 |
rv = ERR_PTR(-EACCES); |
648 |
goto out; |
649 |
} |
650 |
|
651 |
rv = d_alloc_anon(parent); |
652 |
if(rv == NULL) |
653 |
rv = ERR_PTR(-ENOMEM); |
654 |
|
655 |
out: |
656 |
return rv; |
657 |
} |
658 |
|
659 |
|
660 |
SQSH_EXTERN struct inode *squashfs_iget(struct super_block *s, |
661 |
squashfs_inode_t inode, unsigned int inode_number) |
662 |
{ |
663 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
664 |
struct inode *i = iget_locked(s, inode_number); |
665 |
|
666 |
TRACE("Entered squashfs_iget\n"); |
667 |
if (!i) |
668 |
return ERR_PTR(-ENOMEM); |
669 |
|
670 |
if (i->i_state & I_NEW) { |
671 |
(msblk->read_inode)(i, inode); |
672 |
unlock_new_inode(i); |
673 |
} |
674 |
|
675 |
return i; |
676 |
} |
677 |
|
678 |
|
679 |
static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode) |
680 |
{ |
681 |
struct super_block *s = i->i_sb; |
682 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
683 |
struct squashfs_super_block *sblk = &msblk->sblk; |
684 |
long long block = SQUASHFS_INODE_BLK(inode) + sblk->inode_table_start; |
685 |
unsigned int offset = SQUASHFS_INODE_OFFSET(inode); |
686 |
long long next_block; |
687 |
unsigned int next_offset; |
688 |
union squashfs_inode_header id, sid; |
689 |
struct squashfs_base_inode_header *inodeb = &id.base, *sinodeb = &sid.base; |
690 |
|
691 |
TRACE("Entered squashfs_read_inode\n"); |
692 |
|
693 |
if (msblk->swap) { |
694 |
if (!squashfs_get_cached_block(s, sinodeb, block, offset, |
695 |
sizeof(*sinodeb), &next_block, &next_offset)) |
696 |
goto failed_read; |
697 |
SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb, sizeof(*sinodeb)); |
698 |
} else |
699 |
if (!squashfs_get_cached_block(s, inodeb, block, offset, |
700 |
sizeof(*inodeb), &next_block, &next_offset)) |
701 |
goto failed_read; |
702 |
|
703 |
squashfs_new_inode(msblk, i, inodeb); |
704 |
|
705 |
switch(inodeb->inode_type) { |
706 |
case SQUASHFS_FILE_TYPE: { |
707 |
unsigned int frag_size; |
708 |
long long frag_blk; |
709 |
struct squashfs_reg_inode_header *inodep = &id.reg; |
710 |
struct squashfs_reg_inode_header *sinodep = &sid.reg; |
711 |
|
712 |
if (msblk->swap) { |
713 |
if (!squashfs_get_cached_block(s, sinodep, block, offset, |
714 |
sizeof(*sinodep), &next_block, &next_offset)) |
715 |
goto failed_read; |
716 |
SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep); |
717 |
} else |
718 |
if (!squashfs_get_cached_block(s, inodep, block, offset, |
719 |
sizeof(*inodep), &next_block, &next_offset)) |
720 |
goto failed_read; |
721 |
|
722 |
frag_blk = SQUASHFS_INVALID_BLK; |
723 |
|
724 |
if (inodep->fragment != SQUASHFS_INVALID_FRAG) |
725 |
if(!get_fragment_location(s, inodep->fragment, &frag_blk, |
726 |
&frag_size)) |
727 |
goto failed_read; |
728 |
|
729 |
i->i_nlink = 1; |
730 |
i->i_size = inodep->file_size; |
731 |
i->i_fop = &generic_ro_fops; |
732 |
i->i_mode |= S_IFREG; |
733 |
i->i_blocks = ((i->i_size - 1) >> 9) + 1; |
734 |
SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk; |
735 |
SQUASHFS_I(i)->u.s1.fragment_size = frag_size; |
736 |
SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset; |
737 |
SQUASHFS_I(i)->start_block = inodep->start_block; |
738 |
SQUASHFS_I(i)->u.s1.block_list_start = next_block; |
739 |
SQUASHFS_I(i)->offset = next_offset; |
740 |
i->i_data.a_ops = &squashfs_aops; |
741 |
|
742 |
TRACE("File inode %x:%x, start_block %llx, " |
743 |
"block_list_start %llx, offset %x\n", |
744 |
SQUASHFS_INODE_BLK(inode), offset, |
745 |
inodep->start_block, next_block, |
746 |
next_offset); |
747 |
break; |
748 |
} |
749 |
case SQUASHFS_LREG_TYPE: { |
750 |
unsigned int frag_size; |
751 |
long long frag_blk; |
752 |
struct squashfs_lreg_inode_header *inodep = &id.lreg; |
753 |
struct squashfs_lreg_inode_header *sinodep = &sid.lreg; |
754 |
|
755 |
if (msblk->swap) { |
756 |
if (!squashfs_get_cached_block(s, sinodep, block, offset, |
757 |
sizeof(*sinodep), &next_block, &next_offset)) |
758 |
goto failed_read; |
759 |
SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep); |
760 |
} else |
761 |
if (!squashfs_get_cached_block(s, inodep, block, offset, |
762 |
sizeof(*inodep), &next_block, &next_offset)) |
763 |
goto failed_read; |
764 |
|
765 |
frag_blk = SQUASHFS_INVALID_BLK; |
766 |
|
767 |
if (inodep->fragment != SQUASHFS_INVALID_FRAG) |
768 |
if (!get_fragment_location(s, inodep->fragment, &frag_blk, |
769 |
&frag_size)) |
770 |
goto failed_read; |
771 |
|
772 |
i->i_nlink = inodep->nlink; |
773 |
i->i_size = inodep->file_size; |
774 |
i->i_fop = &generic_ro_fops; |
775 |
i->i_mode |= S_IFREG; |
776 |
i->i_blocks = ((i->i_size - 1) >> 9) + 1; |
777 |
SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk; |
778 |
SQUASHFS_I(i)->u.s1.fragment_size = frag_size; |
779 |
SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset; |
780 |
SQUASHFS_I(i)->start_block = inodep->start_block; |
781 |
SQUASHFS_I(i)->u.s1.block_list_start = next_block; |
782 |
SQUASHFS_I(i)->offset = next_offset; |
783 |
i->i_data.a_ops = &squashfs_aops; |
784 |
|
785 |
TRACE("File inode %x:%x, start_block %llx, " |
786 |
"block_list_start %llx, offset %x\n", |
787 |
SQUASHFS_INODE_BLK(inode), offset, |
788 |
inodep->start_block, next_block, |
789 |
next_offset); |
790 |
break; |
791 |
} |
792 |
case SQUASHFS_DIR_TYPE: { |
793 |
struct squashfs_dir_inode_header *inodep = &id.dir; |
794 |
struct squashfs_dir_inode_header *sinodep = &sid.dir; |
795 |
|
796 |
if (msblk->swap) { |
797 |
if (!squashfs_get_cached_block(s, sinodep, block, offset, |
798 |
sizeof(*sinodep), &next_block, &next_offset)) |
799 |
goto failed_read; |
800 |
SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep); |
801 |
} else |
802 |
if (!squashfs_get_cached_block(s, inodep, block, offset, |
803 |
sizeof(*inodep), &next_block, &next_offset)) |
804 |
goto failed_read; |
805 |
|
806 |
i->i_nlink = inodep->nlink; |
807 |
i->i_size = inodep->file_size; |
808 |
i->i_op = &squashfs_dir_inode_ops; |
809 |
i->i_fop = &squashfs_dir_ops; |
810 |
i->i_mode |= S_IFDIR; |
811 |
SQUASHFS_I(i)->start_block = inodep->start_block; |
812 |
SQUASHFS_I(i)->offset = inodep->offset; |
813 |
SQUASHFS_I(i)->u.s2.directory_index_count = 0; |
814 |
SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode; |
815 |
|
816 |
TRACE("Directory inode %x:%x, start_block %x, offset " |
817 |
"%x\n", SQUASHFS_INODE_BLK(inode), |
818 |
offset, inodep->start_block, |
819 |
inodep->offset); |
820 |
break; |
821 |
} |
822 |
case SQUASHFS_LDIR_TYPE: { |
823 |
struct squashfs_ldir_inode_header *inodep = &id.ldir; |
824 |
struct squashfs_ldir_inode_header *sinodep = &sid.ldir; |
825 |
|
826 |
if (msblk->swap) { |
827 |
if (!squashfs_get_cached_block(s, sinodep, block, offset, |
828 |
sizeof(*sinodep), &next_block, &next_offset)) |
829 |
goto failed_read; |
830 |
SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep, sinodep); |
831 |
} else |
832 |
if (!squashfs_get_cached_block(s, inodep, block, offset, |
833 |
sizeof(*inodep), &next_block, &next_offset)) |
834 |
goto failed_read; |
835 |
|
836 |
i->i_nlink = inodep->nlink; |
837 |
i->i_size = inodep->file_size; |
838 |
i->i_op = &squashfs_dir_inode_ops; |
839 |
i->i_fop = &squashfs_dir_ops; |
840 |
i->i_mode |= S_IFDIR; |
841 |
SQUASHFS_I(i)->start_block = inodep->start_block; |
842 |
SQUASHFS_I(i)->offset = inodep->offset; |
843 |
SQUASHFS_I(i)->u.s2.directory_index_start = next_block; |
844 |
SQUASHFS_I(i)->u.s2.directory_index_offset = next_offset; |
845 |
SQUASHFS_I(i)->u.s2.directory_index_count = inodep->i_count; |
846 |
SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode; |
847 |
|
848 |
TRACE("Long directory inode %x:%x, start_block %x, offset %x\n", |
849 |
SQUASHFS_INODE_BLK(inode), offset, |
850 |
inodep->start_block, inodep->offset); |
851 |
break; |
852 |
} |
853 |
case SQUASHFS_SYMLINK_TYPE: { |
854 |
struct squashfs_symlink_inode_header *inodep = &id.symlink; |
855 |
struct squashfs_symlink_inode_header *sinodep = &sid.symlink; |
856 |
|
857 |
if (msblk->swap) { |
858 |
if (!squashfs_get_cached_block(s, sinodep, block, offset, |
859 |
sizeof(*sinodep), &next_block, &next_offset)) |
860 |
goto failed_read; |
861 |
SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep, sinodep); |
862 |
} else |
863 |
if (!squashfs_get_cached_block(s, inodep, block, offset, |
864 |
sizeof(*inodep), &next_block, &next_offset)) |
865 |
goto failed_read; |
866 |
|
867 |
i->i_nlink = inodep->nlink; |
868 |
i->i_size = inodep->symlink_size; |
869 |
i->i_op = &page_symlink_inode_operations; |
870 |
i->i_data.a_ops = &squashfs_symlink_aops; |
871 |
i->i_mode |= S_IFLNK; |
872 |
SQUASHFS_I(i)->start_block = next_block; |
873 |
SQUASHFS_I(i)->offset = next_offset; |
874 |
|
875 |
TRACE("Symbolic link inode %x:%x, start_block %llx, offset %x\n", |
876 |
SQUASHFS_INODE_BLK(inode), offset, |
877 |
next_block, next_offset); |
878 |
break; |
879 |
} |
880 |
case SQUASHFS_BLKDEV_TYPE: |
881 |
case SQUASHFS_CHRDEV_TYPE: { |
882 |
struct squashfs_dev_inode_header *inodep = &id.dev; |
883 |
struct squashfs_dev_inode_header *sinodep = &sid.dev; |
884 |
|
885 |
if (msblk->swap) { |
886 |
if (!squashfs_get_cached_block(s, sinodep, block, offset, |
887 |
sizeof(*sinodep), &next_block, &next_offset)) |
888 |
goto failed_read; |
889 |
SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep); |
890 |
} else |
891 |
if (!squashfs_get_cached_block(s, inodep, block, offset, |
892 |
sizeof(*inodep), &next_block, &next_offset)) |
893 |
goto failed_read; |
894 |
|
895 |
i->i_nlink = inodep->nlink; |
896 |
i->i_mode |= (inodeb->inode_type == SQUASHFS_CHRDEV_TYPE) ? |
897 |
S_IFCHR : S_IFBLK; |
898 |
init_special_inode(i, i->i_mode, old_decode_dev(inodep->rdev)); |
899 |
|
900 |
TRACE("Device inode %x:%x, rdev %x\n", |
901 |
SQUASHFS_INODE_BLK(inode), offset, inodep->rdev); |
902 |
break; |
903 |
} |
904 |
case SQUASHFS_FIFO_TYPE: |
905 |
case SQUASHFS_SOCKET_TYPE: { |
906 |
struct squashfs_ipc_inode_header *inodep = &id.ipc; |
907 |
struct squashfs_ipc_inode_header *sinodep = &sid.ipc; |
908 |
|
909 |
if (msblk->swap) { |
910 |
if (!squashfs_get_cached_block(s, sinodep, block, offset, |
911 |
sizeof(*sinodep), &next_block, &next_offset)) |
912 |
goto failed_read; |
913 |
SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep); |
914 |
} else |
915 |
if (!squashfs_get_cached_block(s, inodep, block, offset, |
916 |
sizeof(*inodep), &next_block, &next_offset)) |
917 |
goto failed_read; |
918 |
|
919 |
i->i_nlink = inodep->nlink; |
920 |
i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE) |
921 |
? S_IFIFO : S_IFSOCK; |
922 |
init_special_inode(i, i->i_mode, 0); |
923 |
break; |
924 |
} |
925 |
default: |
926 |
ERROR("Unknown inode type %d in squashfs_iget!\n", |
927 |
inodeb->inode_type); |
928 |
goto failed_read1; |
929 |
} |
930 |
|
931 |
return 1; |
932 |
|
933 |
failed_read: |
934 |
ERROR("Unable to read inode [%llx:%x]\n", block, offset); |
935 |
|
936 |
failed_read1: |
937 |
make_bad_inode(i); |
938 |
return 0; |
939 |
} |
940 |
|
941 |
|
942 |
static int read_inode_lookup_table(struct super_block *s) |
943 |
{ |
944 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
945 |
struct squashfs_super_block *sblk = &msblk->sblk; |
946 |
unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(sblk->inodes); |
947 |
|
948 |
TRACE("In read_inode_lookup_table, length %d\n", length); |
949 |
|
950 |
/* Allocate inode lookup table */ |
951 |
msblk->inode_lookup_table = kmalloc(length, GFP_KERNEL); |
952 |
if (msblk->inode_lookup_table == NULL) { |
953 |
ERROR("Failed to allocate inode lookup table\n"); |
954 |
return 0; |
955 |
} |
956 |
|
957 |
if (!squashfs_read_data(s, (char *) msblk->inode_lookup_table, |
958 |
sblk->lookup_table_start, length | |
959 |
SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) { |
960 |
ERROR("unable to read inode lookup table\n"); |
961 |
return 0; |
962 |
} |
963 |
|
964 |
if (msblk->swap) { |
965 |
int i; |
966 |
long long block; |
967 |
|
968 |
for (i = 0; i < SQUASHFS_LOOKUP_BLOCKS(sblk->inodes); i++) { |
969 |
/* XXX */ |
970 |
SQUASHFS_SWAP_LOOKUP_BLOCKS((&block), |
971 |
&msblk->inode_lookup_table[i], 1); |
972 |
msblk->inode_lookup_table[i] = block; |
973 |
} |
974 |
} |
975 |
|
976 |
return 1; |
977 |
} |
978 |
|
979 |
|
980 |
static int read_fragment_index_table(struct super_block *s) |
981 |
{ |
982 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
983 |
struct squashfs_super_block *sblk = &msblk->sblk; |
984 |
unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments); |
985 |
|
986 |
if(length == 0) |
987 |
return 1; |
988 |
|
989 |
/* Allocate fragment index table */ |
990 |
msblk->fragment_index = kmalloc(length, GFP_KERNEL); |
991 |
if (msblk->fragment_index == NULL) { |
992 |
ERROR("Failed to allocate fragment index table\n"); |
993 |
return 0; |
994 |
} |
995 |
|
996 |
if (!squashfs_read_data(s, (char *) msblk->fragment_index, |
997 |
sblk->fragment_table_start, length | |
998 |
SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) { |
999 |
ERROR("unable to read fragment index table\n"); |
1000 |
return 0; |
1001 |
} |
1002 |
|
1003 |
if (msblk->swap) { |
1004 |
int i; |
1005 |
long long fragment; |
1006 |
|
1007 |
for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments); i++) { |
1008 |
/* XXX */ |
1009 |
SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment), |
1010 |
&msblk->fragment_index[i], 1); |
1011 |
msblk->fragment_index[i] = fragment; |
1012 |
} |
1013 |
} |
1014 |
|
1015 |
return 1; |
1016 |
} |
1017 |
|
1018 |
|
1019 |
static int readahead_metadata(struct super_block *s) |
1020 |
{ |
1021 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
1022 |
int i; |
1023 |
|
1024 |
squashfs_cached_blks = SQUASHFS_CACHED_BLKS; |
1025 |
|
1026 |
/* Init inode_table block pointer array */ |
1027 |
msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) * |
1028 |
squashfs_cached_blks, GFP_KERNEL); |
1029 |
if (msblk->block_cache == NULL) { |
1030 |
ERROR("Failed to allocate block cache\n"); |
1031 |
goto failed; |
1032 |
} |
1033 |
|
1034 |
for (i = 0; i < squashfs_cached_blks; i++) |
1035 |
msblk->block_cache[i].block = SQUASHFS_INVALID_BLK; |
1036 |
|
1037 |
msblk->next_cache = 0; |
1038 |
msblk->unused_cache_blks = squashfs_cached_blks; |
1039 |
|
1040 |
return 1; |
1041 |
|
1042 |
failed: |
1043 |
return 0; |
1044 |
} |
1045 |
|
1046 |
|
1047 |
static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent) |
1048 |
{ |
1049 |
struct squashfs_super_block *sblk = &msblk->sblk; |
1050 |
|
1051 |
msblk->read_inode = squashfs_read_inode; |
1052 |
msblk->read_blocklist = read_blocklist; |
1053 |
msblk->read_fragment_index_table = read_fragment_index_table; |
1054 |
|
1055 |
if (sblk->s_major == 1) { |
1056 |
if (!squashfs_1_0_supported(msblk)) { |
1057 |
SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems " |
1058 |
"are unsupported\n"); |
1059 |
SERROR("Please recompile with Squashfs 1.0 support enabled\n"); |
1060 |
return 0; |
1061 |
} |
1062 |
} else if (sblk->s_major == 2) { |
1063 |
if (!squashfs_2_0_supported(msblk)) { |
1064 |
SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems " |
1065 |
"are unsupported\n"); |
1066 |
SERROR("Please recompile with Squashfs 2.0 support enabled\n"); |
1067 |
return 0; |
1068 |
} |
1069 |
} else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor > |
1070 |
SQUASHFS_MINOR) { |
1071 |
SERROR("Major/Minor mismatch, trying to mount newer %d.%d " |
1072 |
"filesystem\n", sblk->s_major, sblk->s_minor); |
1073 |
SERROR("Please update your kernel\n"); |
1074 |
return 0; |
1075 |
} |
1076 |
|
1077 |
return 1; |
1078 |
} |
1079 |
|
1080 |
|
1081 |
static int squashfs_fill_super(struct super_block *s, void *data, int silent) |
1082 |
{ |
1083 |
struct squashfs_sb_info *msblk; |
1084 |
struct squashfs_super_block *sblk; |
1085 |
int i; |
1086 |
char b[BDEVNAME_SIZE]; |
1087 |
struct inode *root; |
1088 |
|
1089 |
TRACE("Entered squashfs_fill_superblock\n"); |
1090 |
|
1091 |
s->s_fs_info = kzalloc(sizeof(struct squashfs_sb_info), GFP_KERNEL); |
1092 |
if (s->s_fs_info == NULL) { |
1093 |
ERROR("Failed to allocate superblock\n"); |
1094 |
goto failure; |
1095 |
} |
1096 |
msblk = s->s_fs_info; |
1097 |
|
1098 |
msblk->stream.workspace = vmalloc(zlib_inflate_workspacesize()); |
1099 |
if (msblk->stream.workspace == NULL) { |
1100 |
ERROR("Failed to allocate zlib workspace\n"); |
1101 |
goto failure; |
1102 |
} |
1103 |
sblk = &msblk->sblk; |
1104 |
|
1105 |
msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE); |
1106 |
msblk->devblksize_log2 = ffz(~msblk->devblksize); |
1107 |
|
1108 |
mutex_init(&msblk->read_data_mutex); |
1109 |
mutex_init(&msblk->read_page_mutex); |
1110 |
mutex_init(&msblk->block_cache_mutex); |
1111 |
mutex_init(&msblk->fragment_mutex); |
1112 |
mutex_init(&msblk->meta_index_mutex); |
1113 |
|
1114 |
init_waitqueue_head(&msblk->waitq); |
1115 |
init_waitqueue_head(&msblk->fragment_wait_queue); |
1116 |
|
1117 |
/* sblk->bytes_used is checked in squashfs_read_data to ensure reads are not |
1118 |
* beyond filesystem end. As we're using squashfs_read_data to read sblk here, |
1119 |
* first set sblk->bytes_used to a useful value */ |
1120 |
sblk->bytes_used = sizeof(struct squashfs_super_block); |
1121 |
if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START, |
1122 |
sizeof(struct squashfs_super_block) | |
1123 |
SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, sizeof(struct squashfs_super_block))) { |
1124 |
SERROR("unable to read superblock\n"); |
1125 |
goto failed_mount; |
1126 |
} |
1127 |
|
1128 |
/* Check it is a SQUASHFS superblock */ |
1129 |
if ((s->s_magic = sblk->s_magic) != SQUASHFS_MAGIC) { |
1130 |
if (sblk->s_magic == SQUASHFS_MAGIC_SWAP) { |
1131 |
struct squashfs_super_block ssblk; |
1132 |
|
1133 |
WARNING("Mounting a different endian SQUASHFS filesystem on %s\n", |
1134 |
bdevname(s->s_bdev, b)); |
1135 |
|
1136 |
SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk); |
1137 |
memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block)); |
1138 |
msblk->swap = 1; |
1139 |
} else { |
1140 |
SERROR("Can't find a SQUASHFS superblock on %s\n", |
1141 |
bdevname(s->s_bdev, b)); |
1142 |
goto failed_mount; |
1143 |
} |
1144 |
} |
1145 |
|
1146 |
/* Check the MAJOR & MINOR versions */ |
1147 |
if(!supported_squashfs_filesystem(msblk, silent)) |
1148 |
goto failed_mount; |
1149 |
|
1150 |
/* Check the filesystem does not extend beyond the end of the |
1151 |
block device */ |
1152 |
if(sblk->bytes_used < 0 || sblk->bytes_used > i_size_read(s->s_bdev->bd_inode)) |
1153 |
goto failed_mount; |
1154 |
|
1155 |
/* Check the root inode for sanity */ |
1156 |
if (SQUASHFS_INODE_OFFSET(sblk->root_inode) > SQUASHFS_METADATA_SIZE) |
1157 |
goto failed_mount; |
1158 |
|
1159 |
TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b)); |
1160 |
TRACE("Inodes are %scompressed\n", SQUASHFS_UNCOMPRESSED_INODES(sblk->flags) |
1161 |
? "un" : ""); |
1162 |
TRACE("Data is %scompressed\n", SQUASHFS_UNCOMPRESSED_DATA(sblk->flags) |
1163 |
? "un" : ""); |
1164 |
TRACE("Check data is %spresent in the filesystem\n", |
1165 |
SQUASHFS_CHECK_DATA(sblk->flags) ? "" : "not "); |
1166 |
TRACE("Filesystem size %lld bytes\n", sblk->bytes_used); |
1167 |
TRACE("Block size %d\n", sblk->block_size); |
1168 |
TRACE("Number of inodes %d\n", sblk->inodes); |
1169 |
if (sblk->s_major > 1) |
1170 |
TRACE("Number of fragments %d\n", sblk->fragments); |
1171 |
TRACE("Number of uids %d\n", sblk->no_uids); |
1172 |
TRACE("Number of gids %d\n", sblk->no_guids); |
1173 |
TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start); |
1174 |
TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start); |
1175 |
if (sblk->s_major > 1) |
1176 |
TRACE("sblk->fragment_table_start %llx\n", sblk->fragment_table_start); |
1177 |
TRACE("sblk->uid_start %llx\n", sblk->uid_start); |
1178 |
|
1179 |
s->s_maxbytes = MAX_LFS_FILESIZE; |
1180 |
s->s_flags |= MS_RDONLY; |
1181 |
s->s_op = &squashfs_super_ops; |
1182 |
|
1183 |
if (readahead_metadata(s) == 0) |
1184 |
goto failed_mount; |
1185 |
|
1186 |
/* Allocate read_page block */ |
1187 |
msblk->read_page = vmalloc(sblk->block_size); |
1188 |
if (msblk->read_page == NULL) { |
1189 |
ERROR("Failed to allocate read_page block\n"); |
1190 |
goto failed_mount; |
1191 |
} |
1192 |
|
1193 |
/* Allocate uid and gid tables */ |
1194 |
msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) * |
1195 |
sizeof(unsigned int), GFP_KERNEL); |
1196 |
if (msblk->uid == NULL) { |
1197 |
ERROR("Failed to allocate uid/gid table\n"); |
1198 |
goto failed_mount; |
1199 |
} |
1200 |
msblk->guid = msblk->uid + sblk->no_uids; |
1201 |
|
1202 |
if (msblk->swap) { |
1203 |
unsigned int suid[sblk->no_uids + sblk->no_guids]; |
1204 |
|
1205 |
if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start, |
1206 |
((sblk->no_uids + sblk->no_guids) * |
1207 |
sizeof(unsigned int)) | |
1208 |
SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) { |
1209 |
ERROR("unable to read uid/gid table\n"); |
1210 |
goto failed_mount; |
1211 |
} |
1212 |
|
1213 |
SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids + |
1214 |
sblk->no_guids), (sizeof(unsigned int) * 8)); |
1215 |
} else |
1216 |
if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start, |
1217 |
((sblk->no_uids + sblk->no_guids) * |
1218 |
sizeof(unsigned int)) | |
1219 |
SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) { |
1220 |
ERROR("unable to read uid/gid table\n"); |
1221 |
goto failed_mount; |
1222 |
} |
1223 |
|
1224 |
|
1225 |
if (sblk->s_major == 1 && squashfs_1_0_supported(msblk)) |
1226 |
goto allocate_root; |
1227 |
|
1228 |
msblk->fragment = kzalloc(sizeof(struct squashfs_fragment_cache) * |
1229 |
SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL); |
1230 |
if (msblk->fragment == NULL) { |
1231 |
ERROR("Failed to allocate fragment block cache\n"); |
1232 |
goto failed_mount; |
1233 |
} |
1234 |
|
1235 |
for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) { |
1236 |
msblk->fragment[i].block = SQUASHFS_INVALID_BLK; |
1237 |
} |
1238 |
|
1239 |
msblk->next_fragment = 0; |
1240 |
msblk->unused_frag_blks = SQUASHFS_CACHED_FRAGMENTS; |
1241 |
|
1242 |
/* Allocate and read fragment index table */ |
1243 |
if (msblk->read_fragment_index_table(s) == 0) |
1244 |
goto failed_mount; |
1245 |
|
1246 |
if(sblk->s_major < 3 || sblk->lookup_table_start == SQUASHFS_INVALID_BLK) |
1247 |
goto allocate_root; |
1248 |
|
1249 |
/* Allocate and read inode lookup table */ |
1250 |
if (read_inode_lookup_table(s) == 0) |
1251 |
goto failed_mount; |
1252 |
|
1253 |
s->s_op = &squashfs_export_super_ops; |
1254 |
s->s_export_op = &squashfs_export_ops; |
1255 |
|
1256 |
allocate_root: |
1257 |
root = new_inode(s); |
1258 |
if ((msblk->read_inode)(root, sblk->root_inode) == 0) |
1259 |
goto failed_mount; |
1260 |
insert_inode_hash(root); |
1261 |
|
1262 |
s->s_root = d_alloc_root(root); |
1263 |
if (s->s_root == NULL) { |
1264 |
ERROR("Root inode create failed\n"); |
1265 |
iput(root); |
1266 |
goto failed_mount; |
1267 |
} |
1268 |
|
1269 |
TRACE("Leaving squashfs_fill_super\n"); |
1270 |
return 0; |
1271 |
|
1272 |
failed_mount: |
1273 |
kfree(msblk->inode_lookup_table); |
1274 |
kfree(msblk->fragment_index); |
1275 |
kfree(msblk->fragment); |
1276 |
kfree(msblk->uid); |
1277 |
vfree(msblk->read_page); |
1278 |
kfree(msblk->block_cache); |
1279 |
kfree(msblk->fragment_index_2); |
1280 |
vfree(msblk->stream.workspace); |
1281 |
kfree(s->s_fs_info); |
1282 |
s->s_fs_info = NULL; |
1283 |
return -EINVAL; |
1284 |
|
1285 |
failure: |
1286 |
return -ENOMEM; |
1287 |
} |
1288 |
|
1289 |
|
1290 |
static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
1291 |
{ |
1292 |
struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info; |
1293 |
struct squashfs_super_block *sblk = &msblk->sblk; |
1294 |
|
1295 |
TRACE("Entered squashfs_statfs\n"); |
1296 |
|
1297 |
buf->f_type = SQUASHFS_MAGIC; |
1298 |
buf->f_bsize = sblk->block_size; |
1299 |
buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1; |
1300 |
buf->f_bfree = buf->f_bavail = 0; |
1301 |
buf->f_files = sblk->inodes; |
1302 |
buf->f_ffree = 0; |
1303 |
buf->f_namelen = SQUASHFS_NAME_LEN; |
1304 |
|
1305 |
return 0; |
1306 |
} |
1307 |
|
1308 |
|
1309 |
static int squashfs_symlink_readpage(struct file *file, struct page *page) |
1310 |
{ |
1311 |
struct inode *inode = page->mapping->host; |
1312 |
int index = page->index << PAGE_CACHE_SHIFT, length, bytes, avail_bytes; |
1313 |
long long block = SQUASHFS_I(inode)->start_block; |
1314 |
int offset = SQUASHFS_I(inode)->offset; |
1315 |
void *pageaddr = kmap(page); |
1316 |
|
1317 |
TRACE("Entered squashfs_symlink_readpage, page index %ld, start block " |
1318 |
"%llx, offset %x\n", page->index, |
1319 |
SQUASHFS_I(inode)->start_block, |
1320 |
SQUASHFS_I(inode)->offset); |
1321 |
|
1322 |
for (length = 0; length < index; length += bytes) { |
1323 |
bytes = squashfs_get_cached_block(inode->i_sb, NULL, block, |
1324 |
offset, PAGE_CACHE_SIZE, &block, &offset); |
1325 |
if (bytes == 0) { |
1326 |
ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset); |
1327 |
goto skip_read; |
1328 |
} |
1329 |
} |
1330 |
|
1331 |
if (length != index) { |
1332 |
ERROR("(squashfs_symlink_readpage) length != index\n"); |
1333 |
bytes = 0; |
1334 |
goto skip_read; |
1335 |
} |
1336 |
|
1337 |
avail_bytes = min_t(int, i_size_read(inode) - length, PAGE_CACHE_SIZE); |
1338 |
|
1339 |
bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block, offset, |
1340 |
avail_bytes, &block, &offset); |
1341 |
if (bytes == 0) |
1342 |
ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset); |
1343 |
|
1344 |
skip_read: |
1345 |
memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); |
1346 |
kunmap(page); |
1347 |
flush_dcache_page(page); |
1348 |
SetPageUptodate(page); |
1349 |
unlock_page(page); |
1350 |
|
1351 |
return 0; |
1352 |
} |
1353 |
|
1354 |
|
1355 |
struct meta_index *locate_meta_index(struct inode *inode, int index, int offset) |
1356 |
{ |
1357 |
struct meta_index *meta = NULL; |
1358 |
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; |
1359 |
int i; |
1360 |
|
1361 |
mutex_lock(&msblk->meta_index_mutex); |
1362 |
|
1363 |
TRACE("locate_meta_index: index %d, offset %d\n", index, offset); |
1364 |
|
1365 |
if (msblk->meta_index == NULL) |
1366 |
goto not_allocated; |
1367 |
|
1368 |
for (i = 0; i < SQUASHFS_META_NUMBER; i ++) { |
1369 |
if (msblk->meta_index[i].inode_number == inode->i_ino && |
1370 |
msblk->meta_index[i].offset >= offset && |
1371 |
msblk->meta_index[i].offset <= index && |
1372 |
msblk->meta_index[i].locked == 0) { |
1373 |
TRACE("locate_meta_index: entry %d, offset %d\n", i, |
1374 |
msblk->meta_index[i].offset); |
1375 |
meta = &msblk->meta_index[i]; |
1376 |
offset = meta->offset; |
1377 |
} |
1378 |
} |
1379 |
|
1380 |
if (meta) |
1381 |
meta->locked = 1; |
1382 |
|
1383 |
not_allocated: |
1384 |
mutex_unlock(&msblk->meta_index_mutex); |
1385 |
|
1386 |
return meta; |
1387 |
} |
1388 |
|
1389 |
|
1390 |
struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip) |
1391 |
{ |
1392 |
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; |
1393 |
struct meta_index *meta = NULL; |
1394 |
int i; |
1395 |
|
1396 |
mutex_lock(&msblk->meta_index_mutex); |
1397 |
|
1398 |
TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip); |
1399 |
|
1400 |
if (msblk->meta_index == NULL) { |
1401 |
msblk->meta_index = kmalloc(sizeof(struct meta_index) * |
1402 |
SQUASHFS_META_NUMBER, GFP_KERNEL); |
1403 |
if (msblk->meta_index == NULL) { |
1404 |
ERROR("Failed to allocate meta_index\n"); |
1405 |
goto failed; |
1406 |
} |
1407 |
for (i = 0; i < SQUASHFS_META_NUMBER; i++) { |
1408 |
msblk->meta_index[i].inode_number = 0; |
1409 |
msblk->meta_index[i].locked = 0; |
1410 |
} |
1411 |
msblk->next_meta_index = 0; |
1412 |
} |
1413 |
|
1414 |
for (i = SQUASHFS_META_NUMBER; i && |
1415 |
msblk->meta_index[msblk->next_meta_index].locked; i --) |
1416 |
msblk->next_meta_index = (msblk->next_meta_index + 1) % |
1417 |
SQUASHFS_META_NUMBER; |
1418 |
|
1419 |
if (i == 0) { |
1420 |
TRACE("empty_meta_index: failed!\n"); |
1421 |
goto failed; |
1422 |
} |
1423 |
|
1424 |
TRACE("empty_meta_index: returned meta entry %d, %p\n", |
1425 |
msblk->next_meta_index, |
1426 |
&msblk->meta_index[msblk->next_meta_index]); |
1427 |
|
1428 |
meta = &msblk->meta_index[msblk->next_meta_index]; |
1429 |
msblk->next_meta_index = (msblk->next_meta_index + 1) % |
1430 |
SQUASHFS_META_NUMBER; |
1431 |
|
1432 |
meta->inode_number = inode->i_ino; |
1433 |
meta->offset = offset; |
1434 |
meta->skip = skip; |
1435 |
meta->entries = 0; |
1436 |
meta->locked = 1; |
1437 |
|
1438 |
failed: |
1439 |
mutex_unlock(&msblk->meta_index_mutex); |
1440 |
return meta; |
1441 |
} |
1442 |
|
1443 |
|
1444 |
void release_meta_index(struct inode *inode, struct meta_index *meta) |
1445 |
{ |
1446 |
meta->locked = 0; |
1447 |
smp_mb(); |
1448 |
} |
1449 |
|
1450 |
|
1451 |
static int read_block_index(struct super_block *s, int blocks, char *block_list, |
1452 |
long long *start_block, int *offset) |
1453 |
{ |
1454 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
1455 |
unsigned int *block_listp; |
1456 |
int block = 0; |
1457 |
|
1458 |
if (msblk->swap) { |
1459 |
char sblock_list[blocks << 2]; |
1460 |
|
1461 |
if (!squashfs_get_cached_block(s, sblock_list, *start_block, |
1462 |
*offset, blocks << 2, start_block, offset)) { |
1463 |
ERROR("Fail reading block list [%llx:%x]\n", *start_block, *offset); |
1464 |
goto failure; |
1465 |
} |
1466 |
SQUASHFS_SWAP_INTS(((unsigned int *)block_list), |
1467 |
((unsigned int *)sblock_list), blocks); |
1468 |
} else { |
1469 |
if (!squashfs_get_cached_block(s, block_list, *start_block, |
1470 |
*offset, blocks << 2, start_block, offset)) { |
1471 |
ERROR("Fail reading block list [%llx:%x]\n", *start_block, *offset); |
1472 |
goto failure; |
1473 |
} |
1474 |
} |
1475 |
|
1476 |
for (block_listp = (unsigned int *) block_list; blocks; |
1477 |
block_listp++, blocks --) |
1478 |
block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp); |
1479 |
|
1480 |
return block; |
1481 |
|
1482 |
failure: |
1483 |
return -1; |
1484 |
} |
1485 |
|
1486 |
|
1487 |
#define SIZE 256 |
1488 |
|
1489 |
static inline int calculate_skip(int blocks) { |
1490 |
int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES); |
1491 |
return skip >= 7 ? 7 : skip + 1; |
1492 |
} |
1493 |
|
1494 |
|
1495 |
static int get_meta_index(struct inode *inode, int index, |
1496 |
long long *index_block, int *index_offset, |
1497 |
long long *data_block, char *block_list) |
1498 |
{ |
1499 |
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; |
1500 |
struct squashfs_super_block *sblk = &msblk->sblk; |
1501 |
int skip = calculate_skip(i_size_read(inode) >> sblk->block_log); |
1502 |
int offset = 0; |
1503 |
struct meta_index *meta; |
1504 |
struct meta_entry *meta_entry; |
1505 |
long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start; |
1506 |
int cur_offset = SQUASHFS_I(inode)->offset; |
1507 |
long long cur_data_block = SQUASHFS_I(inode)->start_block; |
1508 |
int i; |
1509 |
|
1510 |
index /= SQUASHFS_META_INDEXES * skip; |
1511 |
|
1512 |
while (offset < index) { |
1513 |
meta = locate_meta_index(inode, index, offset + 1); |
1514 |
|
1515 |
if (meta == NULL) { |
1516 |
meta = empty_meta_index(inode, offset + 1, skip); |
1517 |
if (meta == NULL) |
1518 |
goto all_done; |
1519 |
} else { |
1520 |
if(meta->entries == 0) |
1521 |
goto failed; |
1522 |
/* XXX */ |
1523 |
offset = index < meta->offset + meta->entries ? index : |
1524 |
meta->offset + meta->entries - 1; |
1525 |
/* XXX */ |
1526 |
meta_entry = &meta->meta_entry[offset - meta->offset]; |
1527 |
cur_index_block = meta_entry->index_block + sblk->inode_table_start; |
1528 |
cur_offset = meta_entry->offset; |
1529 |
cur_data_block = meta_entry->data_block; |
1530 |
TRACE("get_meta_index: offset %d, meta->offset %d, " |
1531 |
"meta->entries %d\n", offset, meta->offset, meta->entries); |
1532 |
TRACE("get_meta_index: index_block 0x%llx, offset 0x%x" |
1533 |
" data_block 0x%llx\n", cur_index_block, |
1534 |
cur_offset, cur_data_block); |
1535 |
} |
1536 |
|
1537 |
for (i = meta->offset + meta->entries; i <= index && |
1538 |
i < meta->offset + SQUASHFS_META_ENTRIES; i++) { |
1539 |
int blocks = skip * SQUASHFS_META_INDEXES; |
1540 |
|
1541 |
while (blocks) { |
1542 |
int block = blocks > (SIZE >> 2) ? (SIZE >> 2) : blocks; |
1543 |
int res = read_block_index(inode->i_sb, block, block_list, |
1544 |
&cur_index_block, &cur_offset); |
1545 |
|
1546 |
if (res == -1) |
1547 |
goto failed; |
1548 |
|
1549 |
cur_data_block += res; |
1550 |
blocks -= block; |
1551 |
} |
1552 |
|
1553 |
meta_entry = &meta->meta_entry[i - meta->offset]; |
1554 |
meta_entry->index_block = cur_index_block - sblk->inode_table_start; |
1555 |
meta_entry->offset = cur_offset; |
1556 |
meta_entry->data_block = cur_data_block; |
1557 |
meta->entries ++; |
1558 |
offset ++; |
1559 |
} |
1560 |
|
1561 |
TRACE("get_meta_index: meta->offset %d, meta->entries %d\n", |
1562 |
meta->offset, meta->entries); |
1563 |
|
1564 |
release_meta_index(inode, meta); |
1565 |
} |
1566 |
|
1567 |
all_done: |
1568 |
*index_block = cur_index_block; |
1569 |
*index_offset = cur_offset; |
1570 |
*data_block = cur_data_block; |
1571 |
|
1572 |
return offset * SQUASHFS_META_INDEXES * skip; |
1573 |
|
1574 |
failed: |
1575 |
release_meta_index(inode, meta); |
1576 |
return -1; |
1577 |
} |
1578 |
|
1579 |
|
1580 |
static long long read_blocklist(struct inode *inode, int index, |
1581 |
int readahead_blks, char *block_list, |
1582 |
unsigned short **block_p, unsigned int *bsize) |
1583 |
{ |
1584 |
long long block_ptr; |
1585 |
int offset; |
1586 |
long long block; |
1587 |
int res = get_meta_index(inode, index, &block_ptr, &offset, &block, |
1588 |
block_list); |
1589 |
|
1590 |
TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset" |
1591 |
" 0x%x, block 0x%llx\n", res, index, block_ptr, offset, block); |
1592 |
|
1593 |
if(res == -1) |
1594 |
goto failure; |
1595 |
|
1596 |
index -= res; |
1597 |
|
1598 |
while (index) { |
1599 |
int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index; |
1600 |
int res = read_block_index(inode->i_sb, blocks, block_list, |
1601 |
&block_ptr, &offset); |
1602 |
if (res == -1) |
1603 |
goto failure; |
1604 |
block += res; |
1605 |
index -= blocks; |
1606 |
} |
1607 |
|
1608 |
if (read_block_index(inode->i_sb, 1, block_list, &block_ptr, &offset) == -1) |
1609 |
goto failure; |
1610 |
*bsize = *((unsigned int *) block_list); |
1611 |
|
1612 |
return block; |
1613 |
|
1614 |
failure: |
1615 |
return 0; |
1616 |
} |
1617 |
|
1618 |
|
1619 |
static int squashfs_readpage(struct file *file, struct page *page) |
1620 |
{ |
1621 |
struct inode *inode = page->mapping->host; |
1622 |
struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; |
1623 |
struct squashfs_super_block *sblk = &msblk->sblk; |
1624 |
unsigned char *block_list = NULL; |
1625 |
long long block; |
1626 |
unsigned int bsize, i; |
1627 |
int bytes; |
1628 |
int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT); |
1629 |
void *pageaddr; |
1630 |
struct squashfs_fragment_cache *fragment = NULL; |
1631 |
char *data_ptr = msblk->read_page; |
1632 |
|
1633 |
int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1; |
1634 |
int start_index = page->index & ~mask; |
1635 |
int end_index = start_index | mask; |
1636 |
int file_end = i_size_read(inode) >> sblk->block_log; |
1637 |
int sparse = 0; |
1638 |
|
1639 |
TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", |
1640 |
page->index, SQUASHFS_I(inode)->start_block); |
1641 |
|
1642 |
if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> |
1643 |
PAGE_CACHE_SHIFT)) |
1644 |
goto out; |
1645 |
|
1646 |
if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK |
1647 |
|| index < file_end) { |
1648 |
block_list = kmalloc(SIZE, GFP_KERNEL); |
1649 |
if (block_list == NULL) { |
1650 |
ERROR("Failed to allocate block_list\n"); |
1651 |
goto error_out; |
1652 |
} |
1653 |
|
1654 |
block = (msblk->read_blocklist)(inode, index, 1, block_list, NULL, &bsize); |
1655 |
if (block == 0) |
1656 |
goto error_out; |
1657 |
|
1658 |
if (bsize == 0) { /* hole */ |
1659 |
bytes = index == file_end ? |
1660 |
(i_size_read(inode) & (sblk->block_size - 1)) : sblk->block_size; |
1661 |
sparse = 1; |
1662 |
} else { |
1663 |
mutex_lock(&msblk->read_page_mutex); |
1664 |
|
1665 |
bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block, |
1666 |
bsize, NULL, sblk->block_size); |
1667 |
|
1668 |
if (bytes == 0) { |
1669 |
ERROR("Unable to read page, block %llx, size %x\n", block, bsize); |
1670 |
mutex_unlock(&msblk->read_page_mutex); |
1671 |
goto error_out; |
1672 |
} |
1673 |
} |
1674 |
} else { |
1675 |
fragment = get_cached_fragment(inode->i_sb, |
1676 |
SQUASHFS_I(inode)-> u.s1.fragment_start_block, |
1677 |
SQUASHFS_I(inode)->u.s1.fragment_size); |
1678 |
|
1679 |
if (fragment == NULL) { |
1680 |
ERROR("Unable to read page, block %llx, size %x\n", |
1681 |
SQUASHFS_I(inode)->u.s1.fragment_start_block, |
1682 |
(int) SQUASHFS_I(inode)->u.s1.fragment_size); |
1683 |
goto error_out; |
1684 |
} |
1685 |
bytes = i_size_read(inode) & (sblk->block_size - 1); |
1686 |
data_ptr = fragment->data + SQUASHFS_I(inode)->u.s1.fragment_offset; |
1687 |
} |
1688 |
|
1689 |
for (i = start_index; i <= end_index && bytes > 0; i++, |
1690 |
bytes -= PAGE_CACHE_SIZE, data_ptr += PAGE_CACHE_SIZE) { |
1691 |
struct page *push_page; |
1692 |
int avail = sparse ? 0 : min_t(unsigned int, bytes, PAGE_CACHE_SIZE); |
1693 |
|
1694 |
TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); |
1695 |
|
1696 |
push_page = (i == page->index) ? page : |
1697 |
grab_cache_page_nowait(page->mapping, i); |
1698 |
|
1699 |
if (!push_page) |
1700 |
continue; |
1701 |
|
1702 |
if (PageUptodate(push_page)) |
1703 |
goto skip_page; |
1704 |
|
1705 |
pageaddr = kmap_atomic(push_page, KM_USER0); |
1706 |
memcpy(pageaddr, data_ptr, avail); |
1707 |
memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); |
1708 |
kunmap_atomic(pageaddr, KM_USER0); |
1709 |
flush_dcache_page(push_page); |
1710 |
SetPageUptodate(push_page); |
1711 |
skip_page: |
1712 |
unlock_page(push_page); |
1713 |
if(i != page->index) |
1714 |
page_cache_release(push_page); |
1715 |
} |
1716 |
|
1717 |
if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK |
1718 |
|| index < file_end) { |
1719 |
if (!sparse) |
1720 |
mutex_unlock(&msblk->read_page_mutex); |
1721 |
kfree(block_list); |
1722 |
} else |
1723 |
release_cached_fragment(msblk, fragment); |
1724 |
|
1725 |
return 0; |
1726 |
|
1727 |
error_out: |
1728 |
SetPageError(page); |
1729 |
out: |
1730 |
pageaddr = kmap_atomic(page, KM_USER0); |
1731 |
memset(pageaddr, 0, PAGE_CACHE_SIZE); |
1732 |
kunmap_atomic(pageaddr, KM_USER0); |
1733 |
flush_dcache_page(page); |
1734 |
if (!PageError(page)) |
1735 |
SetPageUptodate(page); |
1736 |
unlock_page(page); |
1737 |
|
1738 |
kfree(block_list); |
1739 |
return 0; |
1740 |
} |
1741 |
|
1742 |
|
1743 |
static int get_dir_index_using_offset(struct super_block *s, |
1744 |
long long *next_block, unsigned int *next_offset, |
1745 |
long long index_start, unsigned int index_offset, int i_count, |
1746 |
long long f_pos) |
1747 |
{ |
1748 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
1749 |
struct squashfs_super_block *sblk = &msblk->sblk; |
1750 |
int i, length = 0; |
1751 |
struct squashfs_dir_index index; |
1752 |
|
1753 |
TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n", |
1754 |
i_count, (unsigned int) f_pos); |
1755 |
|
1756 |
f_pos =- 3; |
1757 |
if (f_pos == 0) |
1758 |
goto finish; |
1759 |
|
1760 |
for (i = 0; i < i_count; i++) { |
1761 |
if (msblk->swap) { |
1762 |
struct squashfs_dir_index sindex; |
1763 |
squashfs_get_cached_block(s, &sindex, index_start, index_offset, |
1764 |
sizeof(sindex), &index_start, &index_offset); |
1765 |
SQUASHFS_SWAP_DIR_INDEX(&index, &sindex); |
1766 |
} else |
1767 |
squashfs_get_cached_block(s, &index, index_start, index_offset, |
1768 |
sizeof(index), &index_start, &index_offset); |
1769 |
|
1770 |
if (index.index > f_pos) |
1771 |
break; |
1772 |
|
1773 |
squashfs_get_cached_block(s, NULL, index_start, index_offset, |
1774 |
index.size + 1, &index_start, &index_offset); |
1775 |
|
1776 |
length = index.index; |
1777 |
*next_block = index.start_block + sblk->directory_table_start; |
1778 |
} |
1779 |
|
1780 |
*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE; |
1781 |
|
1782 |
finish: |
1783 |
return length + 3; |
1784 |
} |
1785 |
|
1786 |
|
1787 |
static int get_dir_index_using_name(struct super_block *s, |
1788 |
long long *next_block, unsigned int *next_offset, |
1789 |
long long index_start, unsigned int index_offset, int i_count, |
1790 |
const char *name, int size) |
1791 |
{ |
1792 |
struct squashfs_sb_info *msblk = s->s_fs_info; |
1793 |
struct squashfs_super_block *sblk = &msblk->sblk; |
1794 |
int i, length = 0; |
1795 |
struct squashfs_dir_index *index; |
1796 |
char *str; |
1797 |
|
1798 |
TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count); |
1799 |
|
1800 |
str = kmalloc(sizeof(struct squashfs_dir_index) + |
1801 |
(SQUASHFS_NAME_LEN + 1) * 2, GFP_KERNEL); |
1802 |
if (str == NULL) { |
1803 |
ERROR("Failed to allocate squashfs_dir_index\n"); |
1804 |
goto failure; |
1805 |
} |
1806 |
|
1807 |
index = (struct squashfs_dir_index *) (str + SQUASHFS_NAME_LEN + 1); |
1808 |
strncpy(str, name, size); |
1809 |
str[size] = '\0'; |
1810 |
|
1811 |
for (i = 0; i < i_count; i++) { |
1812 |
if (msblk->swap) { |
1813 |
struct squashfs_dir_index sindex; |
1814 |
squashfs_get_cached_block(s, &sindex, index_start, index_offset, |
1815 |
sizeof(sindex), &index_start, &index_offset); |
1816 |
SQUASHFS_SWAP_DIR_INDEX(index, &sindex); |
1817 |
} else |
1818 |
squashfs_get_cached_block(s, index, index_start, index_offset, |
1819 |
sizeof(struct squashfs_dir_index), &index_start, &index_offset); |
1820 |
|
1821 |
squashfs_get_cached_block(s, index->name, index_start, index_offset, |
1822 |
index->size + 1, &index_start, &index_offset); |
1823 |
|
1824 |
index->name[index->size + 1] = '\0'; |
1825 |
|
1826 |
if (strcmp(index->name, str) > 0) |
1827 |
break; |
1828 |
|
1829 |
length = index->index; |
1830 |
*next_block = index->start_block + sblk->directory_table_start; |
1831 |
} |
1832 |
|
1833 |
*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE; |
1834 |
kfree(str); |
1835 |
|
1836 |
failure: |
1837 |
return length + 3; |
1838 |
} |
1839 |
|
1840 |
|
1841 |
static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) |
1842 |
{ |
1843 |
struct inode *i = file->f_dentry->d_inode; |
1844 |
struct squashfs_sb_info *msblk = i->i_sb->s_fs_info; |
1845 |
struct squashfs_super_block *sblk = &msblk->sblk; |
1846 |
long long next_block = SQUASHFS_I(i)->start_block + |
1847 |
sblk->directory_table_start; |
1848 |
int next_offset = SQUASHFS_I(i)->offset, length = 0, dir_count; |
1849 |
struct squashfs_dir_header dirh; |
1850 |
struct squashfs_dir_entry *dire; |
1851 |
|
1852 |
TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset); |
1853 |
|
1854 |
dire = kmalloc(sizeof(struct squashfs_dir_entry) + |
1855 |
SQUASHFS_NAME_LEN + 1, GFP_KERNEL); |
1856 |
if (dire == NULL) { |
1857 |
ERROR("Failed to allocate squashfs_dir_entry\n"); |
1858 |
goto finish; |
1859 |
} |
1860 |
|
1861 |
while(file->f_pos < 3) { |
1862 |
char *name; |
1863 |
int size, i_ino; |
1864 |
|
1865 |
if(file->f_pos == 0) { |
1866 |
name = "."; |
1867 |
size = 1; |
1868 |
i_ino = i->i_ino; |
1869 |
} else { |
1870 |
name = ".."; |
1871 |
size = 2; |
1872 |
i_ino = SQUASHFS_I(i)->u.s2.parent_inode; |
1873 |
} |
1874 |
TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n", |
1875 |
(unsigned int) dirent, name, size, (int) |
1876 |
file->f_pos, i_ino, squashfs_filetype_table[1]); |
1877 |
|
1878 |
if (filldir(dirent, name, size, file->f_pos, i_ino, |
1879 |
squashfs_filetype_table[1]) < 0) { |
1880 |
TRACE("Filldir returned less than 0\n"); |
1881 |
goto finish; |
1882 |
} |
1883 |
file->f_pos += size; |
1884 |
} |
1885 |
|
1886 |
length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset, |
1887 |
SQUASHFS_I(i)->u.s2.directory_index_start, |
1888 |
SQUASHFS_I(i)->u.s2.directory_index_offset, |
1889 |
SQUASHFS_I(i)->u.s2.directory_index_count, file->f_pos); |
1890 |
|
1891 |
while (length < i_size_read(i)) { |
1892 |
/* read directory header */ |
1893 |
if (msblk->swap) { |
1894 |
struct squashfs_dir_header sdirh; |
1895 |
|
1896 |
if (!squashfs_get_cached_block(i->i_sb, &sdirh, next_block, |
1897 |
next_offset, sizeof(sdirh), &next_block, &next_offset)) |
1898 |
goto failed_read; |
1899 |
|
1900 |
length += sizeof(sdirh); |
1901 |
SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh); |
1902 |
} else { |
1903 |
if (!squashfs_get_cached_block(i->i_sb, &dirh, next_block, |
1904 |
next_offset, sizeof(dirh), &next_block, &next_offset)) |
1905 |
goto failed_read; |
1906 |
|
1907 |
length += sizeof(dirh); |
1908 |
} |
1909 |
|
1910 |
dir_count = dirh.count + 1; |
1911 |
while (dir_count--) { |
1912 |
if (msblk->swap) { |
1913 |
struct squashfs_dir_entry sdire; |
1914 |
if (!squashfs_get_cached_block(i->i_sb, &sdire, next_block, |
1915 |
next_offset, sizeof(sdire), &next_block, &next_offset)) |
1916 |
goto failed_read; |
1917 |
|
1918 |
length += sizeof(sdire); |
1919 |
SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire); |
1920 |
} else { |
1921 |
if (!squashfs_get_cached_block(i->i_sb, dire, next_block, |
1922 |
next_offset, sizeof(*dire), &next_block, &next_offset)) |
1923 |
goto failed_read; |
1924 |
|
1925 |
length += sizeof(*dire); |
1926 |
} |
1927 |
|
1928 |
if (!squashfs_get_cached_block(i->i_sb, dire->name, next_block, |
1929 |
next_offset, dire->size + 1, &next_block, &next_offset)) |
1930 |
goto failed_read; |
1931 |
|
1932 |
length += dire->size + 1; |
1933 |
|
1934 |
if (file->f_pos >= length) |
1935 |
continue; |
1936 |
|
1937 |
dire->name[dire->size + 1] = '\0'; |
1938 |
|
1939 |
TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n", |
1940 |
(unsigned int) dirent, dire->name, dire->size + 1, |
1941 |
(int) file->f_pos, dirh.start_block, dire->offset, |
1942 |
dirh.inode_number + dire->inode_number, |
1943 |
squashfs_filetype_table[dire->type]); |
1944 |
|
1945 |
if (filldir(dirent, dire->name, dire->size + 1, file->f_pos, |
1946 |
dirh.inode_number + dire->inode_number, |
1947 |
squashfs_filetype_table[dire->type]) < 0) { |
1948 |
TRACE("Filldir returned less than 0\n"); |
1949 |
goto finish; |
1950 |
} |
1951 |
file->f_pos = length; |
1952 |
} |
1953 |
} |
1954 |
|
1955 |
finish: |
1956 |
kfree(dire); |
1957 |
return 0; |
1958 |
|
1959 |
failed_read: |
1960 |
ERROR("Unable to read directory block [%llx:%x]\n", next_block, |
1961 |
next_offset); |
1962 |
kfree(dire); |
1963 |
return 0; |
1964 |
} |
1965 |
|
1966 |
|
1967 |
static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry, |
1968 |
struct nameidata *nd) |
1969 |
{ |
1970 |
const unsigned char *name = dentry->d_name.name; |
1971 |
int len = dentry->d_name.len; |
1972 |
struct inode *inode = NULL; |
1973 |
struct squashfs_sb_info *msblk = i->i_sb->s_fs_info; |
1974 |
struct squashfs_super_block *sblk = &msblk->sblk; |
1975 |
long long next_block = SQUASHFS_I(i)->start_block + |
1976 |
sblk->directory_table_start; |
1977 |
int next_offset = SQUASHFS_I(i)->offset, length = 0, dir_count; |
1978 |
struct squashfs_dir_header dirh; |
1979 |
struct squashfs_dir_entry *dire; |
1980 |
|
1981 |
TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset); |
1982 |
|
1983 |
dire = kmalloc(sizeof(struct squashfs_dir_entry) + |
1984 |
SQUASHFS_NAME_LEN + 1, GFP_KERNEL); |
1985 |
if (dire == NULL) { |
1986 |
ERROR("Failed to allocate squashfs_dir_entry\n"); |
1987 |
goto exit_lookup; |
1988 |
} |
1989 |
|
1990 |
if (len > SQUASHFS_NAME_LEN) |
1991 |
goto exit_lookup; |
1992 |
|
1993 |
length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset, |
1994 |
SQUASHFS_I(i)->u.s2.directory_index_start, |
1995 |
SQUASHFS_I(i)->u.s2.directory_index_offset, |
1996 |
SQUASHFS_I(i)->u.s2.directory_index_count, name, len); |
1997 |
|
1998 |
while (length < i_size_read(i)) { |
1999 |
/* read directory header */ |
2000 |
if (msblk->swap) { |
2001 |
struct squashfs_dir_header sdirh; |
2002 |
if (!squashfs_get_cached_block(i->i_sb, &sdirh, next_block, |
2003 |
next_offset, sizeof(sdirh), &next_block, &next_offset)) |
2004 |
goto failed_read; |
2005 |
|
2006 |
length += sizeof(sdirh); |
2007 |
SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh); |
2008 |
} else { |
2009 |
if (!squashfs_get_cached_block(i->i_sb, &dirh, next_block, |
2010 |
next_offset, sizeof(dirh), &next_block, &next_offset)) |
2011 |
goto failed_read; |
2012 |
|
2013 |
length += sizeof(dirh); |
2014 |
} |
2015 |
|
2016 |
dir_count = dirh.count + 1; |
2017 |
while (dir_count--) { |
2018 |
if (msblk->swap) { |
2019 |
struct squashfs_dir_entry sdire; |
2020 |
if (!squashfs_get_cached_block(i->i_sb, &sdire, next_block, |
2021 |
next_offset, sizeof(sdire), &next_block, &next_offset)) |
2022 |
goto failed_read; |
2023 |
|
2024 |
length += sizeof(sdire); |
2025 |
SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire); |
2026 |
} else { |
2027 |
if (!squashfs_get_cached_block(i->i_sb, dire, next_block, |
2028 |
next_offset, sizeof(*dire), &next_block, &next_offset)) |
2029 |
goto failed_read; |
2030 |
|
2031 |
length += sizeof(*dire); |
2032 |
} |
2033 |
|
2034 |
if (!squashfs_get_cached_block(i->i_sb, dire->name, next_block, |
2035 |
next_offset, dire->size + 1, &next_block, &next_offset)) |
2036 |
goto failed_read; |
2037 |
|
2038 |
length += dire->size + 1; |
2039 |
|
2040 |
if (name[0] < dire->name[0]) |
2041 |
goto exit_lookup; |
2042 |
|
2043 |
if ((len == dire->size + 1) && !strncmp(name, dire->name, len)) { |
2044 |
squashfs_inode_t ino = SQUASHFS_MKINODE(dirh.start_block, |
2045 |
dire->offset); |
2046 |
|
2047 |
TRACE("calling squashfs_iget for directory entry %s, inode" |
2048 |
" %x:%x, %d\n", name, dirh.start_block, dire->offset, |
2049 |
dirh.inode_number + dire->inode_number); |
2050 |
|
2051 |
inode = squashfs_iget(i->i_sb, ino, dirh.inode_number + dire->inode_number); |
2052 |
|
2053 |
goto exit_lookup; |
2054 |
} |
2055 |
} |
2056 |
} |
2057 |
|
2058 |
exit_lookup: |
2059 |
kfree(dire); |
2060 |
if (inode) |
2061 |
return d_splice_alias(inode, dentry); |
2062 |
d_add(dentry, inode); |
2063 |
return ERR_PTR(0); |
2064 |
|
2065 |
failed_read: |
2066 |
ERROR("Unable to read directory block [%llx:%x]\n", next_block, |
2067 |
next_offset); |
2068 |
goto exit_lookup; |
2069 |
} |
2070 |
|
2071 |
|
2072 |
static int squashfs_remount(struct super_block *s, int *flags, char *data) |
2073 |
{ |
2074 |
*flags |= MS_RDONLY; |
2075 |
return 0; |
2076 |
} |
2077 |
|
2078 |
|
2079 |
static void squashfs_put_super(struct super_block *s) |
2080 |
{ |
2081 |
int i; |
2082 |
|
2083 |
if (s->s_fs_info) { |
2084 |
struct squashfs_sb_info *sbi = s->s_fs_info; |
2085 |
if (sbi->block_cache) |
2086 |
for (i = 0; i < squashfs_cached_blks; i++) |
2087 |
if (sbi->block_cache[i].block != SQUASHFS_INVALID_BLK) |
2088 |
vfree(sbi->block_cache[i].data); |
2089 |
if (sbi->fragment) |
2090 |
for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) |
2091 |
vfree(sbi->fragment[i].data); |
2092 |
kfree(sbi->fragment); |
2093 |
kfree(sbi->block_cache); |
2094 |
vfree(sbi->read_page); |
2095 |
kfree(sbi->uid); |
2096 |
kfree(sbi->fragment_index); |
2097 |
kfree(sbi->fragment_index_2); |
2098 |
kfree(sbi->meta_index); |
2099 |
vfree(sbi->stream.workspace); |
2100 |
kfree(s->s_fs_info); |
2101 |
s->s_fs_info = NULL; |
2102 |
} |
2103 |
} |
2104 |
|
2105 |
|
2106 |
static int squashfs_get_sb(struct file_system_type *fs_type, int flags, |
2107 |
const char *dev_name, void *data, struct vfsmount *mnt) |
2108 |
{ |
2109 |
return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super, |
2110 |
mnt); |
2111 |
} |
2112 |
|
2113 |
|
2114 |
static int __init init_squashfs_fs(void) |
2115 |
{ |
2116 |
int err = init_inodecache(); |
2117 |
if (err) |
2118 |
goto out; |
2119 |
|
2120 |
printk(KERN_INFO "squashfs: version 3.3 (2007/10/31) " |
2121 |
"Phillip Lougher\n"); |
2122 |
|
2123 |
err = register_filesystem(&squashfs_fs_type); |
2124 |
if (err) |
2125 |
destroy_inodecache(); |
2126 |
|
2127 |
out: |
2128 |
return err; |
2129 |
} |
2130 |
|
2131 |
|
2132 |
static void __exit exit_squashfs_fs(void) |
2133 |
{ |
2134 |
unregister_filesystem(&squashfs_fs_type); |
2135 |
destroy_inodecache(); |
2136 |
} |
2137 |
|
2138 |
|
2139 |
static struct kmem_cache * squashfs_inode_cachep; |
2140 |
|
2141 |
|
2142 |
static struct inode *squashfs_alloc_inode(struct super_block *sb) |
2143 |
{ |
2144 |
struct squashfs_inode_info *ei; |
2145 |
ei = kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL); |
2146 |
return ei ? &ei->vfs_inode : NULL; |
2147 |
} |
2148 |
|
2149 |
|
2150 |
static void squashfs_destroy_inode(struct inode *inode) |
2151 |
{ |
2152 |
kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode)); |
2153 |
} |
2154 |
|
2155 |
|
2156 |
static void init_once(struct kmem_cache *cachep, void *foo) |
2157 |
{ |
2158 |
struct squashfs_inode_info *ei = foo; |
2159 |
|
2160 |
inode_init_once(&ei->vfs_inode); |
2161 |
} |
2162 |
|
2163 |
|
2164 |
static int __init init_inodecache(void) |
2165 |
{ |
2166 |
squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache", |
2167 |
sizeof(struct squashfs_inode_info), 0, |
2168 |
SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, init_once); |
2169 |
if (squashfs_inode_cachep == NULL) |
2170 |
return -ENOMEM; |
2171 |
return 0; |
2172 |
} |
2173 |
|
2174 |
|
2175 |
static void destroy_inodecache(void) |
2176 |
{ |
2177 |
kmem_cache_destroy(squashfs_inode_cachep); |
2178 |
} |
2179 |
|
2180 |
|
2181 |
module_init(init_squashfs_fs); |
2182 |
module_exit(exit_squashfs_fs); |
2183 |
MODULE_DESCRIPTION("squashfs 3.2-r2-CVS, a compressed read-only filesystem"); |
2184 |
MODULE_AUTHOR("Phillip Lougher <phillip@lougher.demon.co.uk>"); |
2185 |
MODULE_LICENSE("GPL"); |