Line 0
Link Here
|
|
|
1 |
/* |
2 |
* linux/fs/ext3/xattr.c |
3 |
* |
4 |
* Copyright (C) 2001-2003 by Andreas Gruenbacher, <agruen@suse.de> |
5 |
* |
6 |
* Fix by Harrison Xing <harrison@mountainviewdata.com>. |
7 |
* Ext3 code with a lot of help from Eric Jarman <ejarman@acm.org>. |
8 |
* Extended attributes for symlinks and special files added per |
9 |
* suggestion of Luka Renko <luka.renko@hermes.si>. |
10 |
*/ |
11 |
|
12 |
/* |
13 |
* Extended attributes are stored on disk blocks allocated outside of |
14 |
* any inode. The i_file_acl field is then made to point to this allocated |
15 |
* block. If all extended attributes of an inode are identical, these |
16 |
* inodes may share the same extended attribute block. Such situations |
17 |
* are automatically detected by keeping a cache of recent attribute block |
18 |
* numbers and hashes over the block's contents in memory. |
19 |
* |
20 |
* |
21 |
* Extended attribute block layout: |
22 |
* |
23 |
* +------------------+ |
24 |
* | header | |
25 |
* | entry 1 | | |
26 |
* | entry 2 | | growing downwards |
27 |
* | entry 3 | v |
28 |
* | four null bytes | |
29 |
* | . . . | |
30 |
* | value 1 | ^ |
31 |
* | value 3 | | growing upwards |
32 |
* | value 2 | | |
33 |
* +------------------+ |
34 |
* |
35 |
* The block header is followed by multiple entry descriptors. These entry |
36 |
* descriptors are variable in size, and alligned to EXT3_XATTR_PAD |
37 |
* byte boundaries. The entry descriptors are sorted by attribute name, |
38 |
* so that two extended attribute blocks can be compared efficiently. |
39 |
* |
40 |
* Attribute values are aligned to the end of the block, stored in |
41 |
* no specific order. They are also padded to EXT3_XATTR_PAD byte |
42 |
* boundaries. No additional gaps are left between them. |
43 |
* |
44 |
* Locking strategy |
45 |
* ---------------- |
46 |
* EXT3_I(inode)->i_file_acl is protected by EXT3_I(inode)->xattr_sem. |
47 |
* EA blocks are only changed if they are exclusive to an inode, so |
48 |
* holding xattr_sem also means that nothing but the EA block's reference |
49 |
* count will change. Multiple writers to an EA block are synchronized |
50 |
* by the bh lock. No more than a single bh lock is held at any time, |
51 |
* which avoids deadlocks. |
52 |
*/ |
53 |
|
54 |
#include <linux/fs.h> |
55 |
#include <linux/locks.h> |
56 |
#include <linux/slab.h> |
57 |
#include <linux/ext3_jbd.h> |
58 |
#include <linux/ext3_fs.h> |
59 |
#include <linux/ext3_xattr.h> |
60 |
#include <linux/mbcache.h> |
61 |
#include <linux/quotaops.h> |
62 |
#include <linux/rwsem.h> |
63 |
|
64 |
#define HDR(bh) ((struct ext3_xattr_header *)((bh)->b_data)) |
65 |
#define ENTRY(ptr) ((struct ext3_xattr_entry *)(ptr)) |
66 |
#define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1) |
67 |
#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) |
68 |
|
69 |
#ifdef EXT3_XATTR_DEBUG |
70 |
# define ea_idebug(inode, f...) do { \ |
71 |
printk(KERN_DEBUG "inode %s:%ld: ", \ |
72 |
kdevname(inode->i_dev), inode->i_ino); \ |
73 |
printk(f); \ |
74 |
printk("\n"); \ |
75 |
} while (0) |
76 |
# define ea_bdebug(bh, f...) do { \ |
77 |
printk(KERN_DEBUG "block %s:%ld: ", \ |
78 |
kdevname(bh->b_dev), bh->b_blocknr); \ |
79 |
printk(f); \ |
80 |
printk("\n"); \ |
81 |
} while (0) |
82 |
#else |
83 |
# define ea_idebug(f...) |
84 |
# define ea_bdebug(f...) |
85 |
#endif |
86 |
|
87 |
static int ext3_xattr_set_handle2(handle_t *, struct inode *, |
88 |
struct buffer_head *, |
89 |
struct ext3_xattr_header *); |
90 |
|
91 |
#ifdef CONFIG_EXT3_FS_XATTR_SHARING |
92 |
|
93 |
static int ext3_xattr_cache_insert(struct buffer_head *); |
94 |
static struct buffer_head *ext3_xattr_cache_find(handle_t *, struct inode *, |
95 |
struct ext3_xattr_header *); |
96 |
static void ext3_xattr_cache_remove(struct buffer_head *); |
97 |
static void ext3_xattr_rehash(struct ext3_xattr_header *, |
98 |
struct ext3_xattr_entry *); |
99 |
|
100 |
static struct mb_cache *ext3_xattr_cache; |
101 |
|
102 |
#else |
103 |
# define ext3_xattr_cache_insert(bh) 0 |
104 |
# define ext3_xattr_cache_find(handle, inode, header) NULL |
105 |
# define ext3_xattr_cache_remove(bh) while(0) {} |
106 |
# define ext3_xattr_rehash(header, entry) while(0) {} |
107 |
#endif |
108 |
|
109 |
struct ext3_xattr_handler *ext3_xattr_handlers[EXT3_XATTR_INDEX_MAX]; |
110 |
rwlock_t ext3_handler_lock = RW_LOCK_UNLOCKED; |
111 |
|
112 |
int |
113 |
ext3_xattr_register(int name_index, struct ext3_xattr_handler *handler) |
114 |
{ |
115 |
int error = -EINVAL; |
116 |
|
117 |
if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX) { |
118 |
write_lock(&ext3_handler_lock); |
119 |
if (!ext3_xattr_handlers[name_index-1]) { |
120 |
ext3_xattr_handlers[name_index-1] = handler; |
121 |
error = 0; |
122 |
} |
123 |
write_unlock(&ext3_handler_lock); |
124 |
} |
125 |
return error; |
126 |
} |
127 |
|
128 |
void |
129 |
ext3_xattr_unregister(int name_index, struct ext3_xattr_handler *handler) |
130 |
{ |
131 |
if (name_index > 0 || name_index <= EXT3_XATTR_INDEX_MAX) { |
132 |
write_lock(&ext3_handler_lock); |
133 |
ext3_xattr_handlers[name_index-1] = NULL; |
134 |
write_unlock(&ext3_handler_lock); |
135 |
} |
136 |
} |
137 |
|
138 |
static inline const char * |
139 |
strcmp_prefix(const char *a, const char *a_prefix) |
140 |
{ |
141 |
while (*a_prefix && *a == *a_prefix) { |
142 |
a++; |
143 |
a_prefix++; |
144 |
} |
145 |
return *a_prefix ? NULL : a; |
146 |
} |
147 |
|
148 |
/* |
149 |
* Decode the extended attribute name, and translate it into |
150 |
* the name_index and name suffix. |
151 |
*/ |
152 |
static inline struct ext3_xattr_handler * |
153 |
ext3_xattr_resolve_name(const char **name) |
154 |
{ |
155 |
struct ext3_xattr_handler *handler = NULL; |
156 |
int i; |
157 |
|
158 |
if (!*name) |
159 |
return NULL; |
160 |
read_lock(&ext3_handler_lock); |
161 |
for (i=0; i<EXT3_XATTR_INDEX_MAX; i++) { |
162 |
if (ext3_xattr_handlers[i]) { |
163 |
const char *n = strcmp_prefix(*name, |
164 |
ext3_xattr_handlers[i]->prefix); |
165 |
if (n) { |
166 |
handler = ext3_xattr_handlers[i]; |
167 |
*name = n; |
168 |
break; |
169 |
} |
170 |
} |
171 |
} |
172 |
read_unlock(&ext3_handler_lock); |
173 |
return handler; |
174 |
} |
175 |
|
176 |
static inline struct ext3_xattr_handler * |
177 |
ext3_xattr_handler(int name_index) |
178 |
{ |
179 |
struct ext3_xattr_handler *handler = NULL; |
180 |
if (name_index > 0 && name_index <= EXT3_XATTR_INDEX_MAX) { |
181 |
read_lock(&ext3_handler_lock); |
182 |
handler = ext3_xattr_handlers[name_index-1]; |
183 |
read_unlock(&ext3_handler_lock); |
184 |
} |
185 |
return handler; |
186 |
} |
187 |
|
188 |
/* |
189 |
* Inode operation getxattr() |
190 |
* |
191 |
* dentry->d_inode->i_sem: don't care |
192 |
* BKL: held |
193 |
*/ |
194 |
ssize_t |
195 |
ext3_getxattr(struct dentry *dentry, const char *name, |
196 |
void *buffer, size_t size) |
197 |
{ |
198 |
struct ext3_xattr_handler *handler; |
199 |
struct inode *inode = dentry->d_inode; |
200 |
|
201 |
handler = ext3_xattr_resolve_name(&name); |
202 |
if (!handler) |
203 |
return -EOPNOTSUPP; |
204 |
return handler->get(inode, name, buffer, size); |
205 |
} |
206 |
|
207 |
/* |
208 |
* Inode operation listxattr() |
209 |
* |
210 |
* dentry->d_inode->i_sem: don't care |
211 |
* BKL: held |
212 |
*/ |
213 |
ssize_t |
214 |
ext3_listxattr(struct dentry *dentry, char *buffer, size_t size) |
215 |
{ |
216 |
return ext3_xattr_list(dentry->d_inode, buffer, size); |
217 |
} |
218 |
|
219 |
/* |
220 |
* Inode operation setxattr() |
221 |
* |
222 |
* dentry->d_inode->i_sem: down |
223 |
* BKL: held |
224 |
*/ |
225 |
int |
226 |
ext3_setxattr(struct dentry *dentry, const char *name, |
227 |
const void *value, size_t size, int flags) |
228 |
{ |
229 |
struct ext3_xattr_handler *handler; |
230 |
struct inode *inode = dentry->d_inode; |
231 |
|
232 |
if (size == 0) |
233 |
value = ""; /* empty EA, do not remove */ |
234 |
handler = ext3_xattr_resolve_name(&name); |
235 |
if (!handler) |
236 |
return -EOPNOTSUPP; |
237 |
return handler->set(inode, name, value, size, flags); |
238 |
} |
239 |
|
240 |
/* |
241 |
* Inode operation removexattr() |
242 |
* |
243 |
* dentry->d_inode->i_sem: down |
244 |
* BKL: held |
245 |
*/ |
246 |
int |
247 |
ext3_removexattr(struct dentry *dentry, const char *name) |
248 |
{ |
249 |
struct ext3_xattr_handler *handler; |
250 |
struct inode *inode = dentry->d_inode; |
251 |
|
252 |
handler = ext3_xattr_resolve_name(&name); |
253 |
if (!handler) |
254 |
return -EOPNOTSUPP; |
255 |
return handler->set(inode, name, NULL, 0, XATTR_REPLACE); |
256 |
} |
257 |
|
258 |
/* |
259 |
* ext3_xattr_get() |
260 |
* |
261 |
* Copy an extended attribute into the buffer |
262 |
* provided, or compute the buffer size required. |
263 |
* Buffer is NULL to compute the size of the buffer required. |
264 |
* |
265 |
* Returns a negative error number on failure, or the number of bytes |
266 |
* used / required on success. |
267 |
*/ |
268 |
int |
269 |
ext3_xattr_get(struct inode *inode, int name_index, const char *name, |
270 |
void *buffer, size_t buffer_size) |
271 |
{ |
272 |
struct buffer_head *bh = NULL; |
273 |
struct ext3_xattr_entry *entry; |
274 |
size_t size, name_len; |
275 |
char *end; |
276 |
int error; |
277 |
|
278 |
ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", |
279 |
name_index, name, buffer, (long)buffer_size); |
280 |
|
281 |
if (name == NULL) |
282 |
return -EINVAL; |
283 |
down_read(&EXT3_I(inode)->xattr_sem); |
284 |
error = -ENODATA; |
285 |
if (!EXT3_I(inode)->i_file_acl) |
286 |
goto cleanup; |
287 |
ea_idebug(inode, "reading block %d", EXT3_I(inode)->i_file_acl); |
288 |
bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); |
289 |
error = -EIO; |
290 |
if (!bh) |
291 |
goto cleanup; |
292 |
ea_bdebug(bh, "b_count=%d, refcount=%d", |
293 |
atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); |
294 |
end = bh->b_data + bh->b_size; |
295 |
if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || |
296 |
HDR(bh)->h_blocks != cpu_to_le32(1)) { |
297 |
bad_block: ext3_error(inode->i_sb, "ext3_xattr_get", |
298 |
"inode %ld: bad block %d", inode->i_ino, |
299 |
EXT3_I(inode)->i_file_acl); |
300 |
error = -EIO; |
301 |
goto cleanup; |
302 |
} |
303 |
/* find named attribute */ |
304 |
name_len = strlen(name); |
305 |
|
306 |
error = -ERANGE; |
307 |
if (name_len > 255) |
308 |
goto cleanup; |
309 |
entry = FIRST_ENTRY(bh); |
310 |
while (!IS_LAST_ENTRY(entry)) { |
311 |
struct ext3_xattr_entry *next = |
312 |
EXT3_XATTR_NEXT(entry); |
313 |
if ((char *)next >= end) |
314 |
goto bad_block; |
315 |
if (name_index == entry->e_name_index && |
316 |
name_len == entry->e_name_len && |
317 |
memcmp(name, entry->e_name, name_len) == 0) |
318 |
goto found; |
319 |
entry = next; |
320 |
} |
321 |
/* Check the remaining name entries */ |
322 |
while (!IS_LAST_ENTRY(entry)) { |
323 |
struct ext3_xattr_entry *next = |
324 |
EXT3_XATTR_NEXT(entry); |
325 |
if ((char *)next >= end) |
326 |
goto bad_block; |
327 |
entry = next; |
328 |
} |
329 |
if (ext3_xattr_cache_insert(bh)) |
330 |
ea_idebug(inode, "cache insert failed"); |
331 |
error = -ENODATA; |
332 |
goto cleanup; |
333 |
found: |
334 |
/* check the buffer size */ |
335 |
if (entry->e_value_block != 0) |
336 |
goto bad_block; |
337 |
size = le32_to_cpu(entry->e_value_size); |
338 |
if (size > inode->i_sb->s_blocksize || |
339 |
le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize) |
340 |
goto bad_block; |
341 |
|
342 |
if (ext3_xattr_cache_insert(bh)) |
343 |
ea_idebug(inode, "cache insert failed"); |
344 |
if (buffer) { |
345 |
error = -ERANGE; |
346 |
if (size > buffer_size) |
347 |
goto cleanup; |
348 |
/* return value of attribute */ |
349 |
memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), |
350 |
size); |
351 |
} |
352 |
error = size; |
353 |
|
354 |
cleanup: |
355 |
brelse(bh); |
356 |
up_read(&EXT3_I(inode)->xattr_sem); |
357 |
|
358 |
return error; |
359 |
} |
360 |
|
361 |
/* |
362 |
* ext3_xattr_list() |
363 |
* |
364 |
* Copy a list of attribute names into the buffer |
365 |
* provided, or compute the buffer size required. |
366 |
* Buffer is NULL to compute the size of the buffer required. |
367 |
* |
368 |
* Returns a negative error number on failure, or the number of bytes |
369 |
* used / required on success. |
370 |
*/ |
371 |
int |
372 |
ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size) |
373 |
{ |
374 |
struct buffer_head *bh = NULL; |
375 |
struct ext3_xattr_entry *entry; |
376 |
size_t size = 0; |
377 |
char *buf, *end; |
378 |
int error; |
379 |
|
380 |
ea_idebug(inode, "buffer=%p, buffer_size=%ld", |
381 |
buffer, (long)buffer_size); |
382 |
|
383 |
down_read(&EXT3_I(inode)->xattr_sem); |
384 |
error = 0; |
385 |
if (!EXT3_I(inode)->i_file_acl) |
386 |
goto cleanup; |
387 |
ea_idebug(inode, "reading block %d", EXT3_I(inode)->i_file_acl); |
388 |
bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); |
389 |
error = -EIO; |
390 |
if (!bh) |
391 |
goto cleanup; |
392 |
ea_bdebug(bh, "b_count=%d, refcount=%d", |
393 |
atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); |
394 |
end = bh->b_data + bh->b_size; |
395 |
if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || |
396 |
HDR(bh)->h_blocks != cpu_to_le32(1)) { |
397 |
bad_block: ext3_error(inode->i_sb, "ext3_xattr_list", |
398 |
"inode %ld: bad block %d", inode->i_ino, |
399 |
EXT3_I(inode)->i_file_acl); |
400 |
error = -EIO; |
401 |
goto cleanup; |
402 |
} |
403 |
/* compute the size required for the list of attribute names */ |
404 |
for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); |
405 |
entry = EXT3_XATTR_NEXT(entry)) { |
406 |
struct ext3_xattr_handler *handler; |
407 |
struct ext3_xattr_entry *next = |
408 |
EXT3_XATTR_NEXT(entry); |
409 |
if ((char *)next >= end) |
410 |
goto bad_block; |
411 |
|
412 |
handler = ext3_xattr_handler(entry->e_name_index); |
413 |
if (handler) |
414 |
size += handler->list(NULL, inode, entry->e_name, |
415 |
entry->e_name_len); |
416 |
} |
417 |
|
418 |
if (ext3_xattr_cache_insert(bh)) |
419 |
ea_idebug(inode, "cache insert failed"); |
420 |
if (!buffer) { |
421 |
error = size; |
422 |
goto cleanup; |
423 |
} else { |
424 |
error = -ERANGE; |
425 |
if (size > buffer_size) |
426 |
goto cleanup; |
427 |
} |
428 |
|
429 |
/* list the attribute names */ |
430 |
buf = buffer; |
431 |
for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); |
432 |
entry = EXT3_XATTR_NEXT(entry)) { |
433 |
struct ext3_xattr_handler *handler; |
434 |
|
435 |
handler = ext3_xattr_handler(entry->e_name_index); |
436 |
if (handler) |
437 |
buf += handler->list(buf, inode, entry->e_name, |
438 |
entry->e_name_len); |
439 |
} |
440 |
error = size; |
441 |
|
442 |
cleanup: |
443 |
brelse(bh); |
444 |
up_read(&EXT3_I(inode)->xattr_sem); |
445 |
|
446 |
return error; |
447 |
} |
448 |
|
449 |
/* |
450 |
* If the EXT3_FEATURE_COMPAT_EXT_ATTR feature of this file system is |
451 |
* not set, set it. |
452 |
*/ |
453 |
static void ext3_xattr_update_super_block(handle_t *handle, |
454 |
struct super_block *sb) |
455 |
{ |
456 |
if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR)) |
457 |
return; |
458 |
|
459 |
lock_super(sb); |
460 |
if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) { |
461 |
EXT3_SB(sb)->s_es->s_feature_compat |= |
462 |
cpu_to_le32(EXT3_FEATURE_COMPAT_EXT_ATTR); |
463 |
sb->s_dirt = 1; |
464 |
ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); |
465 |
} |
466 |
unlock_super(sb); |
467 |
} |
468 |
|
469 |
/* |
470 |
* ext3_xattr_set_handle() |
471 |
* |
472 |
* Create, replace or remove an extended attribute for this inode. Buffer |
473 |
* is NULL to remove an existing extended attribute, and non-NULL to |
474 |
* either replace an existing extended attribute, or create a new extended |
475 |
* attribute. The flags XATTR_REPLACE and XATTR_CREATE |
476 |
* specify that an extended attribute must exist and must not exist |
477 |
* previous to the call, respectively. |
478 |
* |
479 |
* Returns 0, or a negative error number on failure. |
480 |
*/ |
481 |
int |
482 |
ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, |
483 |
const char *name, const void *value, size_t value_len, |
484 |
int flags) |
485 |
{ |
486 |
struct super_block *sb = inode->i_sb; |
487 |
struct buffer_head *bh = NULL; |
488 |
struct ext3_xattr_header *header = NULL; |
489 |
struct ext3_xattr_entry *here, *last; |
490 |
size_t name_len, free, min_offs = sb->s_blocksize; |
491 |
int not_found = 1, error; |
492 |
char *end; |
493 |
|
494 |
/* |
495 |
* header -- Points either into bh, or to a temporarily |
496 |
* allocated buffer. |
497 |
* here -- The named entry found, or the place for inserting, within |
498 |
* the block pointed to by header. |
499 |
* last -- Points right after the last named entry within the block |
500 |
* pointed to by header. |
501 |
* min_offs -- The offset of the first value (values are aligned |
502 |
* towards the end of the block). |
503 |
* end -- Points right after the block pointed to by header. |
504 |
*/ |
505 |
|
506 |
ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", |
507 |
name_index, name, value, (long)value_len); |
508 |
|
509 |
if (IS_RDONLY(inode)) |
510 |
return -EROFS; |
511 |
if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) |
512 |
return -EPERM; |
513 |
if (value == NULL) |
514 |
value_len = 0; |
515 |
if (name == NULL) |
516 |
return -EINVAL; |
517 |
name_len = strlen(name); |
518 |
if (name_len > 255 || value_len > sb->s_blocksize) |
519 |
return -ERANGE; |
520 |
down_write(&EXT3_I(inode)->xattr_sem); |
521 |
if (EXT3_I(inode)->i_file_acl) { |
522 |
/* The inode already has an extended attribute block. */ |
523 |
bh = sb_bread(sb, EXT3_I(inode)->i_file_acl); |
524 |
error = -EIO; |
525 |
if (!bh) |
526 |
goto cleanup; |
527 |
ea_bdebug(bh, "b_count=%d, refcount=%d", |
528 |
atomic_read(&(bh->b_count)), |
529 |
le32_to_cpu(HDR(bh)->h_refcount)); |
530 |
header = HDR(bh); |
531 |
end = bh->b_data + bh->b_size; |
532 |
if (header->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || |
533 |
header->h_blocks != cpu_to_le32(1)) { |
534 |
bad_block: ext3_error(sb, "ext3_xattr_set", |
535 |
"inode %ld: bad block %d", inode->i_ino, |
536 |
EXT3_I(inode)->i_file_acl); |
537 |
error = -EIO; |
538 |
goto cleanup; |
539 |
} |
540 |
/* Find the named attribute. */ |
541 |
here = FIRST_ENTRY(bh); |
542 |
while (!IS_LAST_ENTRY(here)) { |
543 |
struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(here); |
544 |
if ((char *)next >= end) |
545 |
goto bad_block; |
546 |
if (!here->e_value_block && here->e_value_size) { |
547 |
size_t offs = le16_to_cpu(here->e_value_offs); |
548 |
if (offs < min_offs) |
549 |
min_offs = offs; |
550 |
} |
551 |
not_found = name_index - here->e_name_index; |
552 |
if (!not_found) |
553 |
not_found = name_len - here->e_name_len; |
554 |
if (!not_found) |
555 |
not_found = memcmp(name, here->e_name,name_len); |
556 |
if (not_found <= 0) |
557 |
break; |
558 |
here = next; |
559 |
} |
560 |
last = here; |
561 |
/* We still need to compute min_offs and last. */ |
562 |
while (!IS_LAST_ENTRY(last)) { |
563 |
struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(last); |
564 |
if ((char *)next >= end) |
565 |
goto bad_block; |
566 |
if (!last->e_value_block && last->e_value_size) { |
567 |
size_t offs = le16_to_cpu(last->e_value_offs); |
568 |
if (offs < min_offs) |
569 |
min_offs = offs; |
570 |
} |
571 |
last = next; |
572 |
} |
573 |
|
574 |
/* Check whether we have enough space left. */ |
575 |
free = min_offs - ((char*)last - (char*)header) - sizeof(__u32); |
576 |
} else { |
577 |
/* We will use a new extended attribute block. */ |
578 |
free = sb->s_blocksize - |
579 |
sizeof(struct ext3_xattr_header) - sizeof(__u32); |
580 |
here = last = NULL; /* avoid gcc uninitialized warning. */ |
581 |
} |
582 |
|
583 |
if (not_found) { |
584 |
/* Request to remove a nonexistent attribute? */ |
585 |
error = -ENODATA; |
586 |
if (flags & XATTR_REPLACE) |
587 |
goto cleanup; |
588 |
error = 0; |
589 |
if (value == NULL) |
590 |
goto cleanup; |
591 |
} else { |
592 |
/* Request to create an existing attribute? */ |
593 |
error = -EEXIST; |
594 |
if (flags & XATTR_CREATE) |
595 |
goto cleanup; |
596 |
if (!here->e_value_block && here->e_value_size) { |
597 |
size_t size = le32_to_cpu(here->e_value_size); |
598 |
|
599 |
if (le16_to_cpu(here->e_value_offs) + size > |
600 |
sb->s_blocksize || size > sb->s_blocksize) |
601 |
goto bad_block; |
602 |
free += EXT3_XATTR_SIZE(size); |
603 |
} |
604 |
free += EXT3_XATTR_LEN(name_len); |
605 |
} |
606 |
error = -ENOSPC; |
607 |
if (free < EXT3_XATTR_LEN(name_len) + EXT3_XATTR_SIZE(value_len)) |
608 |
goto cleanup; |
609 |
|
610 |
/* Here we know that we can set the new attribute. */ |
611 |
|
612 |
if (header) { |
613 |
/* assert(header == HDR(bh)); */ |
614 |
if (header->h_refcount != cpu_to_le32(1)) |
615 |
goto skip_get_write_access; |
616 |
/* ext3_journal_get_write_access() requires an unlocked bh, |
617 |
which complicates things here. */ |
618 |
error = ext3_journal_get_write_access(handle, bh); |
619 |
if (error) |
620 |
goto cleanup; |
621 |
lock_buffer(bh); |
622 |
if (header->h_refcount == cpu_to_le32(1)) { |
623 |
ea_bdebug(bh, "modifying in-place"); |
624 |
ext3_xattr_cache_remove(bh); |
625 |
/* keep the buffer locked while modifying it. */ |
626 |
} else { |
627 |
int offset; |
628 |
|
629 |
unlock_buffer(bh); |
630 |
journal_release_buffer(handle, bh); |
631 |
skip_get_write_access: |
632 |
ea_bdebug(bh, "cloning"); |
633 |
header = kmalloc(bh->b_size, GFP_KERNEL); |
634 |
error = -ENOMEM; |
635 |
if (header == NULL) |
636 |
goto cleanup; |
637 |
memcpy(header, HDR(bh), bh->b_size); |
638 |
header->h_refcount = cpu_to_le32(1); |
639 |
offset = (char *)header - bh->b_data; |
640 |
here = ENTRY((char *)here + offset); |
641 |
last = ENTRY((char *)last + offset); |
642 |
} |
643 |
} else { |
644 |
/* Allocate a buffer where we construct the new block. */ |
645 |
header = kmalloc(sb->s_blocksize, GFP_KERNEL); |
646 |
error = -ENOMEM; |
647 |
if (header == NULL) |
648 |
goto cleanup; |
649 |
memset(header, 0, sb->s_blocksize); |
650 |
end = (char *)header + sb->s_blocksize; |
651 |
header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC); |
652 |
header->h_blocks = header->h_refcount = cpu_to_le32(1); |
653 |
last = here = ENTRY(header+1); |
654 |
} |
655 |
|
656 |
/* Iff we are modifying the block in-place, bh is locked here. */ |
657 |
|
658 |
if (not_found) { |
659 |
/* Insert the new name. */ |
660 |
int size = EXT3_XATTR_LEN(name_len); |
661 |
int rest = (char *)last - (char *)here; |
662 |
memmove((char *)here + size, here, rest); |
663 |
memset(here, 0, size); |
664 |
here->e_name_index = name_index; |
665 |
here->e_name_len = name_len; |
666 |
memcpy(here->e_name, name, name_len); |
667 |
} else { |
668 |
if (!here->e_value_block && here->e_value_size) { |
669 |
char *first_val = (char *)header + min_offs; |
670 |
int offs = le16_to_cpu(here->e_value_offs); |
671 |
char *val = (char *)header + offs; |
672 |
size_t size = EXT3_XATTR_SIZE( |
673 |
le32_to_cpu(here->e_value_size)); |
674 |
|
675 |
if (size == EXT3_XATTR_SIZE(value_len)) { |
676 |
/* The old and the new value have the same |
677 |
size. Just replace. */ |
678 |
here->e_value_size = cpu_to_le32(value_len); |
679 |
memset(val + size - EXT3_XATTR_PAD, 0, |
680 |
EXT3_XATTR_PAD); /* Clear pad bytes. */ |
681 |
memcpy(val, value, value_len); |
682 |
goto skip_replace; |
683 |
} |
684 |
|
685 |
/* Remove the old value. */ |
686 |
memmove(first_val + size, first_val, val - first_val); |
687 |
memset(first_val, 0, size); |
688 |
here->e_value_offs = 0; |
689 |
min_offs += size; |
690 |
|
691 |
/* Adjust all value offsets. */ |
692 |
last = ENTRY(header+1); |
693 |
while (!IS_LAST_ENTRY(last)) { |
694 |
int o = le16_to_cpu(last->e_value_offs); |
695 |
if (!last->e_value_block && o < offs) |
696 |
last->e_value_offs = |
697 |
cpu_to_le16(o + size); |
698 |
last = EXT3_XATTR_NEXT(last); |
699 |
} |
700 |
} |
701 |
if (value == NULL) { |
702 |
/* Remove the old name. */ |
703 |
int size = EXT3_XATTR_LEN(name_len); |
704 |
last = ENTRY((char *)last - size); |
705 |
memmove(here, (char*)here + size, |
706 |
(char*)last - (char*)here); |
707 |
memset(last, 0, size); |
708 |
} |
709 |
} |
710 |
|
711 |
if (value != NULL) { |
712 |
/* Insert the new value. */ |
713 |
here->e_value_size = cpu_to_le32(value_len); |
714 |
if (value_len) { |
715 |
size_t size = EXT3_XATTR_SIZE(value_len); |
716 |
char *val = (char *)header + min_offs - size; |
717 |
here->e_value_offs = |
718 |
cpu_to_le16((char *)val - (char *)header); |
719 |
memset(val + size - EXT3_XATTR_PAD, 0, |
720 |
EXT3_XATTR_PAD); /* Clear the pad bytes. */ |
721 |
memcpy(val, value, value_len); |
722 |
} |
723 |
} |
724 |
|
725 |
skip_replace: |
726 |
if (IS_LAST_ENTRY(ENTRY(header+1))) { |
727 |
/* This block is now empty. */ |
728 |
if (bh && header == HDR(bh)) |
729 |
unlock_buffer(bh); /* we were modifying in-place. */ |
730 |
error = ext3_xattr_set_handle2(handle, inode, bh, NULL); |
731 |
} else { |
732 |
ext3_xattr_rehash(header, here); |
733 |
if (bh && header == HDR(bh)) |
734 |
unlock_buffer(bh); /* we were modifying in-place. */ |
735 |
error = ext3_xattr_set_handle2(handle, inode, bh, header); |
736 |
} |
737 |
|
738 |
cleanup: |
739 |
brelse(bh); |
740 |
if (!(bh && header == HDR(bh))) |
741 |
kfree(header); |
742 |
up_write(&EXT3_I(inode)->xattr_sem); |
743 |
|
744 |
return error; |
745 |
} |
746 |
|
747 |
/* |
748 |
* Second half of ext3_xattr_set_handle(): Update the file system. |
749 |
*/ |
750 |
static int |
751 |
ext3_xattr_set_handle2(handle_t *handle, struct inode *inode, |
752 |
struct buffer_head *old_bh, struct ext3_xattr_header *header) |
753 |
{ |
754 |
struct super_block *sb = inode->i_sb; |
755 |
struct buffer_head *new_bh = NULL; |
756 |
int error; |
757 |
|
758 |
if (header) { |
759 |
new_bh = ext3_xattr_cache_find(handle, inode, header); |
760 |
if (new_bh) { |
761 |
/* We found an identical block in the cache. The |
762 |
* block returned is locked. The old block will |
763 |
* be released after updating the inode. |
764 |
*/ |
765 |
ea_bdebug(new_bh, "%s block %ld", |
766 |
(old_bh == new_bh) ? "keeping" : "reusing", |
767 |
new_bh->b_blocknr); |
768 |
|
769 |
error = -EDQUOT; |
770 |
/* How can we enforce the allocation? */ |
771 |
if (DQUOT_ALLOC_BLOCK(inode, 1)) { |
772 |
unlock_buffer(new_bh); |
773 |
journal_release_buffer(handle, new_bh); |
774 |
goto cleanup; |
775 |
} |
776 |
HDR(new_bh)->h_refcount = cpu_to_le32( |
777 |
le32_to_cpu(HDR(new_bh)->h_refcount) + 1); |
778 |
ea_bdebug(new_bh, "refcount now=%d", |
779 |
le32_to_cpu(HDR(new_bh)->h_refcount)); |
780 |
unlock_buffer(new_bh); |
781 |
} else if (old_bh && header == HDR(old_bh)) { |
782 |
/* Keep this block. No need to lock the block as we |
783 |
* don't need to change the reference count. */ |
784 |
new_bh = old_bh; |
785 |
get_bh(new_bh); |
786 |
ext3_xattr_cache_insert(new_bh); |
787 |
} else { |
788 |
/* We need to allocate a new block */ |
789 |
int goal = le32_to_cpu(EXT3_SB(inode->i_sb)->s_es-> |
790 |
s_first_data_block) + |
791 |
EXT3_I(inode)->i_block_group * |
792 |
EXT3_BLOCKS_PER_GROUP(inode->i_sb); |
793 |
/* How can we enforce the allocation? */ |
794 |
int block = ext3_new_block(handle, inode, goal, 0, 0, |
795 |
&error); |
796 |
if (error) |
797 |
goto cleanup; |
798 |
ea_idebug(inode, "creating block %d", block); |
799 |
|
800 |
new_bh = sb_getblk(sb, block); |
801 |
if (!new_bh) { |
802 |
getblk_failed: ext3_free_blocks(handle, inode, block, 1); |
803 |
error = -EIO; |
804 |
goto cleanup; |
805 |
} |
806 |
lock_buffer(new_bh); |
807 |
error = ext3_journal_get_create_access(handle, new_bh); |
808 |
if (error) { |
809 |
unlock_buffer(new_bh); |
810 |
goto getblk_failed; |
811 |
} |
812 |
memcpy(new_bh->b_data, header, new_bh->b_size); |
813 |
mark_buffer_uptodate(new_bh, 1); |
814 |
unlock_buffer(new_bh); |
815 |
ext3_xattr_cache_insert(new_bh); |
816 |
|
817 |
ext3_xattr_update_super_block(handle, sb); |
818 |
} |
819 |
error = ext3_journal_dirty_metadata(handle, new_bh); |
820 |
if (error) |
821 |
goto cleanup; |
822 |
} |
823 |
|
824 |
/* Update the inode. */ |
825 |
EXT3_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; |
826 |
inode->i_ctime = CURRENT_TIME; |
827 |
ext3_mark_inode_dirty(handle, inode); |
828 |
if (IS_SYNC(inode)) |
829 |
handle->h_sync = 1; |
830 |
|
831 |
error = 0; |
832 |
if (old_bh && old_bh != new_bh) { |
833 |
/* |
834 |
* If there was an old block and we are no longer using it, |
835 |
* release the old block. |
836 |
*/ |
837 |
|
838 |
error = ext3_journal_get_write_access(handle, old_bh); |
839 |
if (error) |
840 |
goto cleanup; |
841 |
lock_buffer(old_bh); |
842 |
if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) { |
843 |
/* Free the old block. */ |
844 |
ea_bdebug(old_bh, "freeing"); |
845 |
ext3_free_blocks(handle, inode, old_bh->b_blocknr, 1); |
846 |
|
847 |
/* ext3_forget() calls bforget() for us, but we |
848 |
let our caller release old_bh, so we need to |
849 |
duplicate the handle before. */ |
850 |
get_bh(old_bh); |
851 |
ext3_forget(handle, 1, inode, old_bh,old_bh->b_blocknr); |
852 |
} else { |
853 |
/* Decrement the refcount only. */ |
854 |
HDR(old_bh)->h_refcount = cpu_to_le32( |
855 |
le32_to_cpu(HDR(old_bh)->h_refcount) - 1); |
856 |
DQUOT_FREE_BLOCK(inode, 1); |
857 |
ext3_journal_dirty_metadata(handle, old_bh); |
858 |
ea_bdebug(old_bh, "refcount now=%d", |
859 |
le32_to_cpu(HDR(old_bh)->h_refcount)); |
860 |
} |
861 |
unlock_buffer(old_bh); |
862 |
} |
863 |
|
864 |
cleanup: |
865 |
brelse(new_bh); |
866 |
|
867 |
return error; |
868 |
} |
869 |
|
870 |
/* |
871 |
* ext3_xattr_set() |
872 |
* |
873 |
* Like ext3_xattr_set_handle, but start from an inode. This extended |
874 |
* attribute modification is a filesystem transaction by itself. |
875 |
* |
876 |
* Returns 0, or a negative error number on failure. |
877 |
*/ |
878 |
int |
879 |
ext3_xattr_set(struct inode *inode, int name_index, const char *name, |
880 |
const void *value, size_t value_len, int flags) |
881 |
{ |
882 |
handle_t *handle; |
883 |
int error, error2; |
884 |
|
885 |
lock_kernel(); |
886 |
handle = ext3_journal_start(inode, EXT3_XATTR_TRANS_BLOCKS); |
887 |
error = PTR_ERR(handle); |
888 |
if (IS_ERR(handle)) |
889 |
goto cleanup; |
890 |
error = ext3_xattr_set_handle(handle, inode, name_index, name, |
891 |
value, value_len, flags); |
892 |
error2 = ext3_journal_stop(handle, inode); |
893 |
if (!error) |
894 |
error = error2; |
895 |
|
896 |
cleanup: |
897 |
unlock_kernel(); |
898 |
return error; |
899 |
} |
900 |
|
901 |
/* |
902 |
* ext3_xattr_delete_inode() |
903 |
* |
904 |
* Free extended attribute resources associated with this inode. This |
905 |
* is called immediately before an inode is freed. |
906 |
*/ |
907 |
void |
908 |
ext3_xattr_delete_inode(handle_t *handle, struct inode *inode) |
909 |
{ |
910 |
struct buffer_head *bh = NULL; |
911 |
|
912 |
down_write(&EXT3_I(inode)->xattr_sem); |
913 |
if (!EXT3_I(inode)->i_file_acl) |
914 |
goto cleanup; |
915 |
bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl); |
916 |
if (!bh) { |
917 |
ext3_error(inode->i_sb, "ext3_xattr_delete_inode", |
918 |
"inode %ld: block %d read error", inode->i_ino, |
919 |
EXT3_I(inode)->i_file_acl); |
920 |
goto cleanup; |
921 |
} |
922 |
ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); |
923 |
if (HDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) || |
924 |
HDR(bh)->h_blocks != cpu_to_le32(1)) { |
925 |
ext3_error(inode->i_sb, "ext3_xattr_delete_inode", |
926 |
"inode %ld: bad block %d", inode->i_ino, |
927 |
EXT3_I(inode)->i_file_acl); |
928 |
goto cleanup; |
929 |
} |
930 |
if (ext3_journal_get_write_access(handle, bh) != 0) |
931 |
goto cleanup; |
932 |
lock_buffer(bh); |
933 |
if (HDR(bh)->h_refcount == cpu_to_le32(1)) { |
934 |
ext3_xattr_cache_remove(bh); |
935 |
ext3_free_blocks(handle, inode, EXT3_I(inode)->i_file_acl, 1); |
936 |
|
937 |
/* ext3_forget() calls bforget() for us, but we release |
938 |
old_bh blow, so we need to duplicate the handle before. */ |
939 |
get_bh(bh); |
940 |
ext3_forget(handle, 1, inode, bh, EXT3_I(inode)->i_file_acl); |
941 |
} else { |
942 |
HDR(bh)->h_refcount = cpu_to_le32( |
943 |
le32_to_cpu(HDR(bh)->h_refcount) - 1); |
944 |
ext3_journal_dirty_metadata(handle, bh); |
945 |
if (IS_SYNC(inode)) |
946 |
handle->h_sync = 1; |
947 |
DQUOT_FREE_BLOCK(inode, 1); |
948 |
} |
949 |
ea_bdebug(bh, "refcount now=%d", le32_to_cpu(HDR(bh)->h_refcount)); |
950 |
unlock_buffer(bh); |
951 |
EXT3_I(inode)->i_file_acl = 0; |
952 |
|
953 |
cleanup: |
954 |
brelse(bh); |
955 |
up_write(&EXT3_I(inode)->xattr_sem); |
956 |
} |
957 |
|
958 |
/* |
959 |
* ext3_xattr_put_super() |
960 |
* |
961 |
* This is called when a file system is unmounted. |
962 |
*/ |
963 |
void |
964 |
ext3_xattr_put_super(struct super_block *sb) |
965 |
{ |
966 |
#ifdef CONFIG_EXT3_FS_XATTR_SHARING |
967 |
mb_cache_shrink(ext3_xattr_cache, sb->s_dev); |
968 |
#endif |
969 |
} |
970 |
|
971 |
#ifdef CONFIG_EXT3_FS_XATTR_SHARING |
972 |
|
973 |
/* |
974 |
* ext3_xattr_cache_insert() |
975 |
* |
976 |
* Create a new entry in the extended attribute cache, and insert |
977 |
* it unless such an entry is already in the cache. |
978 |
* |
979 |
* Returns 0, or a negative error number on failure. |
980 |
*/ |
981 |
static int |
982 |
ext3_xattr_cache_insert(struct buffer_head *bh) |
983 |
{ |
984 |
__u32 hash = le32_to_cpu(HDR(bh)->h_hash); |
985 |
struct mb_cache_entry *ce; |
986 |
int error; |
987 |
|
988 |
ce = mb_cache_entry_alloc(ext3_xattr_cache); |
989 |
if (!ce) |
990 |
return -ENOMEM; |
991 |
error = mb_cache_entry_insert(ce, bh->b_dev, bh->b_blocknr, &hash); |
992 |
if (error) { |
993 |
mb_cache_entry_free(ce); |
994 |
if (error == -EBUSY) { |
995 |
ea_bdebug(bh, "already in cache (%d cache entries)", |
996 |
atomic_read(&ext3_xattr_cache->c_entry_count)); |
997 |
error = 0; |
998 |
} |
999 |
} else { |
1000 |
ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash, |
1001 |
atomic_read(&ext3_xattr_cache->c_entry_count)); |
1002 |
mb_cache_entry_release(ce); |
1003 |
} |
1004 |
return error; |
1005 |
} |
1006 |
|
1007 |
/* |
1008 |
* ext3_xattr_cmp() |
1009 |
* |
1010 |
* Compare two extended attribute blocks for equality. |
1011 |
* |
1012 |
* Returns 0 if the blocks are equal, 1 if they differ, and |
1013 |
* a negative error number on errors. |
1014 |
*/ |
1015 |
static int |
1016 |
ext3_xattr_cmp(struct ext3_xattr_header *header1, |
1017 |
struct ext3_xattr_header *header2) |
1018 |
{ |
1019 |
struct ext3_xattr_entry *entry1, *entry2; |
1020 |
|
1021 |
entry1 = ENTRY(header1+1); |
1022 |
entry2 = ENTRY(header2+1); |
1023 |
while (!IS_LAST_ENTRY(entry1)) { |
1024 |
if (IS_LAST_ENTRY(entry2)) |
1025 |
return 1; |
1026 |
if (entry1->e_hash != entry2->e_hash || |
1027 |
entry1->e_name_len != entry2->e_name_len || |
1028 |
entry1->e_value_size != entry2->e_value_size || |
1029 |
memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) |
1030 |
return 1; |
1031 |
if (entry1->e_value_block != 0 || entry2->e_value_block != 0) |
1032 |
return -EIO; |
1033 |
if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), |
1034 |
(char *)header2 + le16_to_cpu(entry2->e_value_offs), |
1035 |
le32_to_cpu(entry1->e_value_size))) |
1036 |
return 1; |
1037 |
|
1038 |
entry1 = EXT3_XATTR_NEXT(entry1); |
1039 |
entry2 = EXT3_XATTR_NEXT(entry2); |
1040 |
} |
1041 |
if (!IS_LAST_ENTRY(entry2)) |
1042 |
return 1; |
1043 |
return 0; |
1044 |
} |
1045 |
|
1046 |
/* |
1047 |
* ext3_xattr_cache_find() |
1048 |
* |
1049 |
* Find an identical extended attribute block. |
1050 |
* |
1051 |
* Returns a pointer to the block found, or NULL if such a block was |
1052 |
* not found or an error occurred. |
1053 |
*/ |
1054 |
static struct buffer_head * |
1055 |
ext3_xattr_cache_find(handle_t *handle, struct inode *inode, |
1056 |
struct ext3_xattr_header *header) |
1057 |
{ |
1058 |
__u32 hash = le32_to_cpu(header->h_hash); |
1059 |
struct mb_cache_entry *ce; |
1060 |
|
1061 |
if (!header->h_hash) |
1062 |
return NULL; /* never share */ |
1063 |
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); |
1064 |
ce = mb_cache_entry_find_first(ext3_xattr_cache, 0, inode->i_dev, hash); |
1065 |
while (ce) { |
1066 |
struct buffer_head *bh = sb_bread(inode->i_sb, ce->e_block); |
1067 |
|
1068 |
if (!bh) { |
1069 |
ext3_error(inode->i_sb, "ext3_xattr_cache_find", |
1070 |
"inode %ld: block %ld read error", |
1071 |
inode->i_ino, ce->e_block); |
1072 |
} else { |
1073 |
/* ext3_journal_get_write_access() requires an unlocked |
1074 |
bh, which complicates things here. */ |
1075 |
if (ext3_journal_get_write_access(handle, bh) != 0) |
1076 |
return NULL; |
1077 |
lock_buffer(bh); |
1078 |
if (le32_to_cpu(HDR(bh)->h_refcount) > |
1079 |
EXT3_XATTR_REFCOUNT_MAX) { |
1080 |
ea_idebug(inode, "block %ld refcount %d>%d", |
1081 |
ce->e_block, |
1082 |
le32_to_cpu(HDR(bh)->h_refcount), |
1083 |
EXT3_XATTR_REFCOUNT_MAX); |
1084 |
} else if (!ext3_xattr_cmp(header, HDR(bh))) { |
1085 |
ea_bdebug(bh, "b_count=%d", |
1086 |
atomic_read(&(bh->b_count))); |
1087 |
mb_cache_entry_release(ce); |
1088 |
/* buffer will be unlocked by caller */ |
1089 |
return bh; |
1090 |
} |
1091 |
unlock_buffer(bh); |
1092 |
journal_release_buffer(handle, bh); |
1093 |
brelse(bh); |
1094 |
} |
1095 |
ce = mb_cache_entry_find_next(ce, 0, inode->i_dev, hash); |
1096 |
} |
1097 |
return NULL; |
1098 |
} |
1099 |
|
1100 |
/* |
1101 |
* ext3_xattr_cache_remove() |
1102 |
* |
1103 |
* Remove the cache entry of a block from the cache. Called when a |
1104 |
* block becomes invalid. |
1105 |
*/ |
1106 |
static void |
1107 |
ext3_xattr_cache_remove(struct buffer_head *bh) |
1108 |
{ |
1109 |
struct mb_cache_entry *ce; |
1110 |
|
1111 |
ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_dev, bh->b_blocknr); |
1112 |
if (ce) { |
1113 |
ea_bdebug(bh, "removing (%d cache entries remaining)", |
1114 |
atomic_read(&ext3_xattr_cache->c_entry_count)-1); |
1115 |
mb_cache_entry_free(ce); |
1116 |
} else |
1117 |
ea_bdebug(bh, "no cache entry"); |
1118 |
} |
1119 |
|
1120 |
#define NAME_HASH_SHIFT 5 |
1121 |
#define VALUE_HASH_SHIFT 16 |
1122 |
|
1123 |
/* |
1124 |
* ext3_xattr_hash_entry() |
1125 |
* |
1126 |
* Compute the hash of an extended attribute. |
1127 |
*/ |
1128 |
static inline void ext3_xattr_hash_entry(struct ext3_xattr_header *header, |
1129 |
struct ext3_xattr_entry *entry) |
1130 |
{ |
1131 |
__u32 hash = 0; |
1132 |
char *name = entry->e_name; |
1133 |
int n; |
1134 |
|
1135 |
for (n=0; n < entry->e_name_len; n++) { |
1136 |
hash = (hash << NAME_HASH_SHIFT) ^ |
1137 |
(hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ |
1138 |
*name++; |
1139 |
} |
1140 |
|
1141 |
if (entry->e_value_block == 0 && entry->e_value_size != 0) { |
1142 |
__u32 *value = (__u32 *)((char *)header + |
1143 |
le16_to_cpu(entry->e_value_offs)); |
1144 |
for (n = (le32_to_cpu(entry->e_value_size) + |
1145 |
EXT3_XATTR_ROUND) >> EXT3_XATTR_PAD_BITS; n; n--) { |
1146 |
hash = (hash << VALUE_HASH_SHIFT) ^ |
1147 |
(hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ |
1148 |
le32_to_cpu(*value++); |
1149 |
} |
1150 |
} |
1151 |
entry->e_hash = cpu_to_le32(hash); |
1152 |
} |
1153 |
|
1154 |
#undef NAME_HASH_SHIFT |
1155 |
#undef VALUE_HASH_SHIFT |
1156 |
|
1157 |
#define BLOCK_HASH_SHIFT 16 |
1158 |
|
1159 |
/* |
1160 |
* ext3_xattr_rehash() |
1161 |
* |
1162 |
* Re-compute the extended attribute hash value after an entry has changed. |
1163 |
*/ |
1164 |
static void ext3_xattr_rehash(struct ext3_xattr_header *header, |
1165 |
struct ext3_xattr_entry *entry) |
1166 |
{ |
1167 |
struct ext3_xattr_entry *here; |
1168 |
__u32 hash = 0; |
1169 |
|
1170 |
ext3_xattr_hash_entry(header, entry); |
1171 |
here = ENTRY(header+1); |
1172 |
while (!IS_LAST_ENTRY(here)) { |
1173 |
if (!here->e_hash) { |
1174 |
/* Block is not shared if an entry's hash value == 0 */ |
1175 |
hash = 0; |
1176 |
break; |
1177 |
} |
1178 |
hash = (hash << BLOCK_HASH_SHIFT) ^ |
1179 |
(hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ |
1180 |
le32_to_cpu(here->e_hash); |
1181 |
here = EXT3_XATTR_NEXT(here); |
1182 |
} |
1183 |
header->h_hash = cpu_to_le32(hash); |
1184 |
} |
1185 |
|
1186 |
#undef BLOCK_HASH_SHIFT |
1187 |
|
1188 |
int __init |
1189 |
init_ext3_xattr(void) |
1190 |
{ |
1191 |
ext3_xattr_cache = mb_cache_create("ext3_xattr", NULL, |
1192 |
sizeof(struct mb_cache_entry) + |
1193 |
sizeof(struct mb_cache_entry_index), 1, 61); |
1194 |
if (!ext3_xattr_cache) |
1195 |
return -ENOMEM; |
1196 |
|
1197 |
return 0; |
1198 |
} |
1199 |
|
1200 |
void |
1201 |
exit_ext3_xattr(void) |
1202 |
{ |
1203 |
if (ext3_xattr_cache) |
1204 |
mb_cache_destroy(ext3_xattr_cache); |
1205 |
ext3_xattr_cache = NULL; |
1206 |
} |
1207 |
|
1208 |
#else /* CONFIG_EXT3_FS_XATTR_SHARING */ |
1209 |
|
1210 |
int __init |
1211 |
init_ext3_xattr(void) |
1212 |
{ |
1213 |
return 0; |
1214 |
} |
1215 |
|
1216 |
void |
1217 |
exit_ext3_xattr(void) |
1218 |
{ |
1219 |
} |
1220 |
|
1221 |
#endif /* CONFIG_EXT3_FS_XATTR_SHARING */ |