Line 0
Link Here
|
|
|
1 |
/* |
2 |
* grsecurity/gracl.c |
3 |
* Copyright Brad Spengler 2001, 2002, 2003 |
4 |
* |
5 |
*/ |
6 |
|
7 |
#include <linux/kernel.h> |
8 |
#include <linux/sched.h> |
9 |
#include <linux/mm.h> |
10 |
#include <linux/file.h> |
11 |
#include <linux/fs.h> |
12 |
#include <linux/proc_fs.h> |
13 |
#include <linux/smp_lock.h> |
14 |
#include <linux/slab.h> |
15 |
#include <linux/vmalloc.h> |
16 |
#include <linux/types.h> |
17 |
#include <linux/capability.h> |
18 |
#include <linux/sysctl.h> |
19 |
#include <linux/gracl.h> |
20 |
#include <linux/gralloc.h> |
21 |
#include <linux/grsecurity.h> |
22 |
#include <linux/grinternal.h> |
23 |
|
24 |
#include <asm/uaccess.h> |
25 |
#include <asm/errno.h> |
26 |
#include <asm/mman.h> |
27 |
|
28 |
static struct acl_role_db acl_role_set; |
29 |
static struct acl_role_label *role_list_head; |
30 |
static struct name_db name_set; |
31 |
static struct name_db inodev_set; |
32 |
|
33 |
static struct acl_role_label *default_role; |
34 |
|
35 |
static u16 acl_sp_role_value; |
36 |
|
37 |
static DECLARE_MUTEX(gr_dev_sem); |
38 |
rwlock_t gr_inode_lock = RW_LOCK_UNLOCKED; |
39 |
|
40 |
extern char *gr_shared_page[4][NR_CPUS]; |
41 |
struct gr_arg *gr_usermode; |
42 |
|
43 |
static unsigned long gr_status = GR_STATUS_INIT; |
44 |
|
45 |
extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum); |
46 |
extern void gr_clear_learn_entries(void); |
47 |
|
48 |
#ifdef CONFIG_GRKERNSEC_RESLOG |
49 |
extern __inline__ void gr_log_resource(const struct task_struct *task, |
50 |
const int res, |
51 |
const unsigned long wanted); |
52 |
#endif |
53 |
|
54 |
unsigned char *gr_system_salt; |
55 |
unsigned char *gr_system_sum; |
56 |
|
57 |
static struct sprole_pw **acl_special_roles = NULL; |
58 |
static __u16 num_sprole_pws = 0; |
59 |
|
60 |
static struct acl_role_label *kernel_role = NULL; |
61 |
|
62 |
/* The following are used to keep a place held in the hash table when we move |
63 |
entries around. They can be replaced during insert. */ |
64 |
|
65 |
static struct acl_subject_label *deleted_subject; |
66 |
static struct acl_object_label *deleted_object; |
67 |
static struct name_entry *deleted_inodev; |
68 |
|
69 |
/* for keeping track of the last and final allocated subjects, since |
70 |
nested subject parsing is tricky |
71 |
*/ |
72 |
static struct acl_subject_label *s_last = NULL; |
73 |
static struct acl_subject_label *s_final = NULL; |
74 |
|
75 |
static unsigned int gr_auth_attempts = 0; |
76 |
static unsigned long gr_auth_expires = 0UL; |
77 |
|
78 |
extern int gr_init_uidset(void); |
79 |
extern void gr_free_uidset(void); |
80 |
extern void gr_remove_uid(uid_t uid); |
81 |
extern int gr_find_uid(uid_t uid); |
82 |
|
83 |
__inline__ int |
84 |
gr_acl_is_enabled(void) |
85 |
{ |
86 |
return (gr_status & GR_READY); |
87 |
} |
88 |
|
89 |
__inline__ int |
90 |
gr_acl_tpe_check(void) |
91 |
{ |
92 |
if (unlikely(!(gr_status & GR_READY))) |
93 |
return 0; |
94 |
if (current->role->roletype & GR_ROLE_TPE) |
95 |
return 1; |
96 |
else |
97 |
return 0; |
98 |
} |
99 |
|
100 |
int |
101 |
gr_handle_rawio(const struct inode *inode) |
102 |
{ |
103 |
if (inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO) && |
104 |
((gr_status & GR_READY) |
105 |
#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS |
106 |
|| (grsec_enable_chroot_caps && proc_is_chrooted(current)) |
107 |
#endif |
108 |
)) |
109 |
return 1; |
110 |
return 0; |
111 |
} |
112 |
|
113 |
|
114 |
static __inline__ int |
115 |
gr_streq(const char *a, const char *b, const __u16 lena, const __u16 lenb) |
116 |
{ |
117 |
int i; |
118 |
unsigned long *l1; |
119 |
unsigned long *l2; |
120 |
unsigned char *c1; |
121 |
unsigned char *c2; |
122 |
int num_longs; |
123 |
|
124 |
if (likely(lena != lenb)) |
125 |
return 0; |
126 |
|
127 |
l1 = (unsigned long *)a; |
128 |
l2 = (unsigned long *)b; |
129 |
|
130 |
num_longs = lena / sizeof(unsigned long); |
131 |
|
132 |
for (i = num_longs; i--; l1++, l2++) { |
133 |
if (unlikely(*l1 != *l2)) |
134 |
return 0; |
135 |
} |
136 |
|
137 |
c1 = (unsigned char *) l1; |
138 |
c2 = (unsigned char *) l2; |
139 |
|
140 |
i = lena - (num_longs * sizeof(unsigned long)); |
141 |
|
142 |
for (; i--; c1++, c2++) { |
143 |
if (unlikely(*c1 != *c2)) |
144 |
return 0; |
145 |
} |
146 |
|
147 |
return 1; |
148 |
} |
149 |
|
150 |
static __inline__ char * |
151 |
d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, |
152 |
char *buf, int buflen) |
153 |
{ |
154 |
char *res; |
155 |
struct dentry *our_dentry; |
156 |
struct vfsmount *our_mount; |
157 |
struct vfsmount *rootmnt; |
158 |
struct dentry *root; |
159 |
|
160 |
our_dentry = (struct dentry *) dentry; |
161 |
our_mount = (struct vfsmount *) vfsmnt; |
162 |
|
163 |
read_lock(&child_reaper->fs->lock); |
164 |
rootmnt = mntget(child_reaper->fs->rootmnt); |
165 |
root = dget(child_reaper->fs->root); |
166 |
read_unlock(&child_reaper->fs->lock); |
167 |
|
168 |
spin_lock(&dcache_lock); |
169 |
res = __d_path(our_dentry, our_mount, root, rootmnt, buf, buflen); |
170 |
spin_unlock(&dcache_lock); |
171 |
dput(root); |
172 |
mntput(rootmnt); |
173 |
return res; |
174 |
} |
175 |
|
176 |
char * |
177 |
gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt) |
178 |
{ |
179 |
return d_real_path(dentry, mnt, gr_shared_page[0][smp_processor_id()], |
180 |
PAGE_SIZE); |
181 |
} |
182 |
|
183 |
char * |
184 |
gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt) |
185 |
{ |
186 |
return d_real_path(dentry, mnt, gr_shared_page[1][smp_processor_id()], |
187 |
PAGE_SIZE); |
188 |
} |
189 |
|
190 |
char * |
191 |
gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt) |
192 |
{ |
193 |
return d_real_path(dentry, mnt, gr_shared_page[2][smp_processor_id()], |
194 |
PAGE_SIZE); |
195 |
} |
196 |
|
197 |
char * |
198 |
gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt) |
199 |
{ |
200 |
return d_real_path(dentry, mnt, gr_shared_page[3][smp_processor_id()], |
201 |
PAGE_SIZE); |
202 |
} |
203 |
|
204 |
__inline__ __u32 |
205 |
to_gr_audit(const __u32 reqmode) |
206 |
{ |
207 |
__u32 retmode = 0; |
208 |
|
209 |
retmode |= (reqmode & GR_READ) ? GR_AUDIT_READ : 0; |
210 |
retmode |= (reqmode & GR_WRITE) ? GR_AUDIT_WRITE | GR_AUDIT_APPEND : 0; |
211 |
retmode |= (reqmode & GR_APPEND) ? GR_AUDIT_APPEND : 0; |
212 |
retmode |= (reqmode & GR_EXEC) ? GR_AUDIT_EXEC : 0; |
213 |
retmode |= (reqmode & GR_INHERIT) ? GR_AUDIT_INHERIT : 0; |
214 |
retmode |= (reqmode & GR_FIND) ? GR_AUDIT_FIND : 0; |
215 |
retmode |= (reqmode & GR_SETID) ? GR_AUDIT_SETID : 0; |
216 |
retmode |= (reqmode & GR_CREATE) ? GR_AUDIT_CREATE : 0; |
217 |
retmode |= (reqmode & GR_DELETE) ? GR_AUDIT_DELETE : 0; |
218 |
|
219 |
return retmode; |
220 |
} |
221 |
|
222 |
__inline__ struct acl_role_label * |
223 |
lookup_acl_role_label(const struct task_struct *task, const uid_t uid, |
224 |
const gid_t gid) |
225 |
{ |
226 |
unsigned long index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size); |
227 |
struct acl_role_label *match; |
228 |
struct role_allowed_ip *ipp; |
229 |
__u8 i = 0; |
230 |
|
231 |
match = acl_role_set.r_hash[index]; |
232 |
|
233 |
while (match |
234 |
&& (match->uidgid != uid || !(match->roletype & GR_ROLE_USER))) { |
235 |
index = (index + (1 << i)) % acl_role_set.r_size; |
236 |
match = acl_role_set.r_hash[index]; |
237 |
i = (i + 1) % 32; |
238 |
} |
239 |
|
240 |
if (!match || match->uidgid != uid || !(match->roletype & GR_ROLE_USER)) { |
241 |
try_group: |
242 |
index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size); |
243 |
match = acl_role_set.r_hash[index]; |
244 |
i = 0; |
245 |
|
246 |
while (match |
247 |
&& (match->uidgid != gid |
248 |
|| !(match->roletype & GR_ROLE_GROUP))) { |
249 |
index = (index + (1 << i)) % acl_role_set.r_size; |
250 |
match = acl_role_set.r_hash[index]; |
251 |
i = (i + 1) % 32; |
252 |
} |
253 |
|
254 |
if (!match || match->uidgid != gid |
255 |
|| !(match->roletype & GR_ROLE_GROUP)) |
256 |
match = default_role; |
257 |
else if (likely(!match->allowed_ips)) { |
258 |
return match; |
259 |
} else { |
260 |
for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { |
261 |
if (likely |
262 |
((task->curr_ip & ipp->netmask) == |
263 |
(ipp->addr & ipp->netmask))) |
264 |
return match; |
265 |
} |
266 |
match = default_role; |
267 |
} |
268 |
} else if (likely(!match->allowed_ips)) { |
269 |
return match; |
270 |
} else { |
271 |
for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { |
272 |
if (likely |
273 |
((task->curr_ip & ipp->netmask) == |
274 |
(ipp->addr & ipp->netmask))) |
275 |
return match; |
276 |
} |
277 |
goto try_group; |
278 |
} |
279 |
|
280 |
return match; |
281 |
} |
282 |
|
283 |
__inline__ struct acl_subject_label * |
284 |
lookup_acl_subj_label(const ino_t ino, const kdev_t dev, |
285 |
const struct acl_role_label *role) |
286 |
{ |
287 |
unsigned long subj_size = role->subj_hash_size; |
288 |
struct acl_subject_label **s_hash = role->subj_hash; |
289 |
unsigned long index = fhash(ino, dev, subj_size); |
290 |
struct acl_subject_label *match; |
291 |
__u8 i = 0; |
292 |
|
293 |
match = s_hash[index]; |
294 |
|
295 |
while (match && (match->inode != ino || match->device != dev || |
296 |
(match->mode & GR_DELETED))) { |
297 |
index = (index + (1 << i)) % subj_size; |
298 |
match = s_hash[index]; |
299 |
i = (i + 1) % 32; |
300 |
} |
301 |
|
302 |
if (unlikely(match && (match != deleted_subject) && |
303 |
(match->inode == ino) && (match->device == dev) && |
304 |
!(match->mode & GR_DELETED))) |
305 |
return match; |
306 |
else |
307 |
return NULL; |
308 |
} |
309 |
|
310 |
static __inline__ struct acl_object_label * |
311 |
lookup_acl_obj_label(const ino_t ino, const kdev_t dev, |
312 |
const struct acl_subject_label *subj) |
313 |
{ |
314 |
unsigned long obj_size = subj->obj_hash_size; |
315 |
struct acl_object_label **o_hash = subj->obj_hash; |
316 |
unsigned long index = fhash(ino, dev, obj_size); |
317 |
struct acl_object_label *match; |
318 |
__u8 i = 0; |
319 |
|
320 |
match = o_hash[index]; |
321 |
|
322 |
while (match && (match->inode != ino || match->device != dev || |
323 |
(match->mode & GR_DELETED))) { |
324 |
index = (index + (1 << i)) % obj_size; |
325 |
match = o_hash[index]; |
326 |
i = (i + 1) % 32; |
327 |
} |
328 |
|
329 |
if (unlikely(match && (match != deleted_object) && |
330 |
(match->inode == ino) && (match->device == dev) && |
331 |
!(match->mode & GR_DELETED))) |
332 |
return match; |
333 |
else |
334 |
return NULL; |
335 |
} |
336 |
|
337 |
static __inline__ struct acl_object_label * |
338 |
lookup_acl_obj_label_create(const ino_t ino, const kdev_t dev, |
339 |
const struct acl_subject_label *subj) |
340 |
{ |
341 |
unsigned long obj_size = subj->obj_hash_size; |
342 |
struct acl_object_label **o_hash = subj->obj_hash; |
343 |
unsigned long index = fhash(ino, dev, obj_size); |
344 |
struct acl_object_label *match; |
345 |
__u8 i = 0; |
346 |
|
347 |
match = o_hash[index]; |
348 |
|
349 |
while (match && (match->inode != ino || match->device != dev || |
350 |
!(match->mode & GR_DELETED))) { |
351 |
index = (index + (1 << i)) % obj_size; |
352 |
match = o_hash[index]; |
353 |
i = (i + 1) % 32; |
354 |
} |
355 |
|
356 |
if (unlikely(match && (match != deleted_object) && |
357 |
(match->inode == ino) && (match->device == dev) && |
358 |
(match->mode & GR_DELETED))) |
359 |
return match; |
360 |
|
361 |
i = 0; |
362 |
index = fhash(ino, dev, obj_size); |
363 |
match = o_hash[index]; |
364 |
|
365 |
while (match && (match->inode != ino || match->device != dev || |
366 |
(match->mode & GR_DELETED))) { |
367 |
index = (index + (1 << i)) % obj_size; |
368 |
match = o_hash[index]; |
369 |
i = (i + 1) % 32; |
370 |
} |
371 |
|
372 |
if (unlikely(match && (match != deleted_object) && |
373 |
(match->inode == ino) && (match->device == dev) && |
374 |
!(match->mode & GR_DELETED))) |
375 |
return match; |
376 |
else |
377 |
return NULL; |
378 |
} |
379 |
|
380 |
static __inline__ struct name_entry * |
381 |
lookup_name_entry(const char *name) |
382 |
{ |
383 |
__u16 len = strlen(name); |
384 |
unsigned long index = nhash(name, len, name_set.n_size); |
385 |
struct name_entry *match; |
386 |
__u8 i = 0; |
387 |
|
388 |
match = name_set.n_hash[index]; |
389 |
|
390 |
while (match && !gr_streq(match->name, name, match->len, len)) { |
391 |
index = (index + (1 << i)) % name_set.n_size; |
392 |
match = name_set.n_hash[index]; |
393 |
i = (i + 1) % 32; |
394 |
} |
395 |
|
396 |
if (unlikely(!match || !gr_streq(match->name, name, match->len, len))) |
397 |
return NULL; |
398 |
else |
399 |
return match; |
400 |
} |
401 |
|
402 |
static __inline__ struct name_entry * |
403 |
lookup_inodev_entry(const ino_t ino, const kdev_t dev) |
404 |
{ |
405 |
unsigned long index = fhash(ino, dev, inodev_set.n_size); |
406 |
struct name_entry *match; |
407 |
__u8 i = 0; |
408 |
|
409 |
match = inodev_set.n_hash[index]; |
410 |
|
411 |
while (match && (match->inode != ino || match->device != dev)) { |
412 |
index = (index + (1 << i)) % inodev_set.n_size; |
413 |
match = inodev_set.n_hash[index]; |
414 |
i = (i + 1) % 32; |
415 |
} |
416 |
|
417 |
if (unlikely(match && (match != deleted_inodev) && |
418 |
(match->inode == ino) && (match->device == dev))) |
419 |
return match; |
420 |
else |
421 |
return NULL; |
422 |
} |
423 |
|
424 |
static void |
425 |
insert_inodev_entry(struct name_entry *nentry) |
426 |
{ |
427 |
unsigned long index = fhash(nentry->inode, nentry->device, |
428 |
inodev_set.n_size); |
429 |
struct name_entry **curr; |
430 |
__u8 i = 0; |
431 |
|
432 |
curr = &inodev_set.n_hash[index]; |
433 |
|
434 |
while (*curr && *curr != deleted_inodev) { |
435 |
index = (index + (1 << i)) % inodev_set.n_size; |
436 |
curr = &inodev_set.n_hash[index]; |
437 |
i = (i + 1) % 32; |
438 |
} |
439 |
|
440 |
*curr = nentry; |
441 |
|
442 |
return; |
443 |
} |
444 |
|
445 |
static void |
446 |
insert_acl_role_label(struct acl_role_label *role) |
447 |
{ |
448 |
unsigned long index = |
449 |
rhash(role->uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size); |
450 |
struct acl_role_label **curr; |
451 |
__u8 i = 0; |
452 |
|
453 |
curr = &acl_role_set.r_hash[index]; |
454 |
|
455 |
while (*curr) { |
456 |
index = (index + (1 << i)) % acl_role_set.r_size; |
457 |
curr = &acl_role_set.r_hash[index]; |
458 |
i = (i + 1) % 32; |
459 |
} |
460 |
|
461 |
*curr = role; |
462 |
|
463 |
return; |
464 |
} |
465 |
|
466 |
static int |
467 |
insert_name_entry(char *name, const ino_t inode, const kdev_t device) |
468 |
{ |
469 |
struct name_entry **curr; |
470 |
__u8 i = 0; |
471 |
__u16 len = strlen(name); |
472 |
unsigned long index = nhash(name, len, name_set.n_size); |
473 |
|
474 |
curr = &name_set.n_hash[index]; |
475 |
|
476 |
while (*curr && !gr_streq((*curr)->name, name, (*curr)->len, len)) { |
477 |
index = (index + (1 << i)) % name_set.n_size; |
478 |
curr = &name_set.n_hash[index]; |
479 |
i = (i + 1) % 32; |
480 |
} |
481 |
|
482 |
if (!(*curr)) { |
483 |
struct name_entry *nentry = |
484 |
acl_alloc(sizeof (struct name_entry)); |
485 |
if (!nentry) |
486 |
return 0; |
487 |
nentry->name = name; |
488 |
nentry->inode = inode; |
489 |
nentry->device = device; |
490 |
nentry->len = len; |
491 |
*curr = nentry; |
492 |
/* insert us into the table searchable by inode/dev */ |
493 |
insert_inodev_entry(nentry); |
494 |
} |
495 |
|
496 |
return 1; |
497 |
} |
498 |
|
499 |
static void |
500 |
insert_acl_obj_label(struct acl_object_label *obj, |
501 |
struct acl_subject_label *subj) |
502 |
{ |
503 |
unsigned long index = |
504 |
fhash(obj->inode, obj->device, subj->obj_hash_size); |
505 |
struct acl_object_label **curr; |
506 |
__u8 i = 0; |
507 |
|
508 |
curr = &subj->obj_hash[index]; |
509 |
|
510 |
while (*curr && *curr != deleted_object) { |
511 |
index = (index + (1 << i)) % subj->obj_hash_size; |
512 |
curr = &subj->obj_hash[index]; |
513 |
i = (i + 1) % 32; |
514 |
} |
515 |
|
516 |
*curr = obj; |
517 |
|
518 |
return; |
519 |
} |
520 |
|
521 |
static void |
522 |
insert_acl_subj_label(struct acl_subject_label *obj, |
523 |
struct acl_role_label *role) |
524 |
{ |
525 |
unsigned long subj_size = role->subj_hash_size; |
526 |
struct acl_subject_label **s_hash = role->subj_hash; |
527 |
unsigned long index = fhash(obj->inode, obj->device, subj_size); |
528 |
struct acl_subject_label **curr; |
529 |
__u8 i = 0; |
530 |
|
531 |
curr = &s_hash[index]; |
532 |
|
533 |
while (*curr && *curr != deleted_subject) { |
534 |
index = (index + (1 << i)) % subj_size; |
535 |
curr = &s_hash[index]; |
536 |
i = (i + 1) % 32; |
537 |
} |
538 |
|
539 |
*curr = obj; |
540 |
|
541 |
return; |
542 |
} |
543 |
|
544 |
static void ** |
545 |
create_table(__u32 * len) |
546 |
{ |
547 |
unsigned long table_sizes[] = { |
548 |
7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381, |
549 |
32749, 65521, 131071, 262139, 524287, 1048573, 2097143, |
550 |
4194301, 8388593, 16777213, 33554393, 67108859, 134217689, |
551 |
268435399, 536870909, 1073741789, 2147483647 |
552 |
}; |
553 |
void *newtable = NULL; |
554 |
unsigned int pwr = 0; |
555 |
|
556 |
while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) && |
557 |
table_sizes[pwr] <= (2 * (*len))) |
558 |
pwr++; |
559 |
|
560 |
if (table_sizes[pwr] <= (2 * (*len))) |
561 |
return newtable; |
562 |
|
563 |
if ((table_sizes[pwr] * sizeof (void *)) <= PAGE_SIZE) |
564 |
newtable = |
565 |
kmalloc(table_sizes[pwr] * sizeof (void *), GFP_KERNEL); |
566 |
else |
567 |
newtable = vmalloc(table_sizes[pwr] * sizeof (void *)); |
568 |
|
569 |
*len = table_sizes[pwr]; |
570 |
|
571 |
return newtable; |
572 |
} |
573 |
|
574 |
static int |
575 |
init_variables(const unsigned long acl_obj_size, |
576 |
const unsigned long acl_subj_size, |
577 |
const unsigned long acl_ip_size, |
578 |
const unsigned long acl_role_size, |
579 |
const unsigned long allowed_ip_size, |
580 |
const unsigned long acl_trans_size, |
581 |
const __u16 num_sprole_pws) |
582 |
{ |
583 |
unsigned long stacksize; |
584 |
|
585 |
acl_role_set.r_size = acl_role_size; |
586 |
name_set.n_size = (acl_obj_size + acl_subj_size); |
587 |
inodev_set.n_size = (acl_obj_size + acl_subj_size); |
588 |
|
589 |
if (!gr_init_uidset()) |
590 |
return 1; |
591 |
|
592 |
/* set up the stack that holds allocation info */ |
593 |
|
594 |
stacksize = (3 * acl_obj_size) + (2 * acl_role_size) + |
595 |
(4 * acl_subj_size) + acl_ip_size + (2 * acl_trans_size) + |
596 |
allowed_ip_size + (2 * num_sprole_pws) + 5; |
597 |
|
598 |
if (!acl_alloc_stack_init(stacksize)) |
599 |
return 1; |
600 |
|
601 |
/* create our empty, fake deleted acls */ |
602 |
deleted_subject = |
603 |
(struct acl_subject_label *) |
604 |
acl_alloc(sizeof (struct acl_subject_label)); |
605 |
deleted_object = |
606 |
(struct acl_object_label *) |
607 |
acl_alloc(sizeof (struct acl_object_label)); |
608 |
deleted_inodev = |
609 |
(struct name_entry *) acl_alloc(sizeof (struct name_entry)); |
610 |
|
611 |
if (!deleted_subject || !deleted_object || !deleted_inodev) |
612 |
return 1; |
613 |
|
614 |
memset(deleted_subject, 0, sizeof (struct acl_subject_label)); |
615 |
memset(deleted_object, 0, sizeof (struct acl_object_label)); |
616 |
memset(deleted_inodev, 0, sizeof (struct name_entry)); |
617 |
|
618 |
/* We only want 50% full tables for now */ |
619 |
|
620 |
acl_role_set.r_hash = |
621 |
(struct acl_role_label **) create_table(&acl_role_set.r_size); |
622 |
name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size); |
623 |
inodev_set.n_hash = |
624 |
(struct name_entry **) create_table(&inodev_set.n_size); |
625 |
|
626 |
if (!acl_role_set.r_hash || !name_set.n_hash || !inodev_set.n_hash) |
627 |
return 1; |
628 |
memset(acl_role_set.r_hash, 0, |
629 |
sizeof (struct acl_role_label *) * acl_role_set.r_size); |
630 |
memset(name_set.n_hash, 0, |
631 |
sizeof (struct name_entry *) * name_set.n_size); |
632 |
memset(inodev_set.n_hash, 0, |
633 |
sizeof (struct name_entry *) * inodev_set.n_size); |
634 |
|
635 |
return 0; |
636 |
} |
637 |
|
638 |
static void |
639 |
free_variables(void) |
640 |
{ |
641 |
struct acl_subject_label *s; |
642 |
struct acl_role_label *r; |
643 |
struct task_struct *task; |
644 |
|
645 |
gr_clear_learn_entries(); |
646 |
|
647 |
read_lock(&tasklist_lock); |
648 |
for_each_task(task) { |
649 |
task->acl_sp_role = 0; |
650 |
task->acl_role_id = 0; |
651 |
task->acl = NULL; |
652 |
task->role = NULL; |
653 |
} |
654 |
read_unlock(&tasklist_lock); |
655 |
|
656 |
/* free all object hash tables */ |
657 |
|
658 |
if (role_list_head) { |
659 |
for (r = role_list_head; r; r = r->next) { |
660 |
if (!r->subj_hash) |
661 |
break; |
662 |
for (s = r->proc_subject; s; s = s->next) { |
663 |
if (!s->obj_hash) |
664 |
break; |
665 |
if ((s->obj_hash_size * |
666 |
sizeof (struct acl_object_label *)) <= |
667 |
PAGE_SIZE) |
668 |
kfree(s->obj_hash); |
669 |
else |
670 |
vfree(s->obj_hash); |
671 |
} |
672 |
if ((r->subj_hash_size * |
673 |
sizeof (struct acl_subject_label *)) <= PAGE_SIZE) |
674 |
kfree(r->subj_hash); |
675 |
else |
676 |
vfree(r->subj_hash); |
677 |
} |
678 |
} |
679 |
|
680 |
acl_free_all(); |
681 |
|
682 |
if (acl_role_set.r_hash) { |
683 |
if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <= |
684 |
PAGE_SIZE) |
685 |
kfree(acl_role_set.r_hash); |
686 |
else |
687 |
vfree(acl_role_set.r_hash); |
688 |
} |
689 |
if (name_set.n_hash) { |
690 |
if ((name_set.n_size * sizeof (struct name_entry *)) <= |
691 |
PAGE_SIZE) |
692 |
kfree(name_set.n_hash); |
693 |
else |
694 |
vfree(name_set.n_hash); |
695 |
} |
696 |
|
697 |
if (inodev_set.n_hash) { |
698 |
if ((inodev_set.n_size * sizeof (struct name_entry *)) <= |
699 |
PAGE_SIZE) |
700 |
kfree(inodev_set.n_hash); |
701 |
else |
702 |
vfree(inodev_set.n_hash); |
703 |
} |
704 |
|
705 |
gr_free_uidset(); |
706 |
|
707 |
memset(&name_set, 0, sizeof (struct name_db)); |
708 |
memset(&inodev_set, 0, sizeof (struct name_db)); |
709 |
memset(&acl_role_set, 0, sizeof (struct acl_role_db)); |
710 |
|
711 |
role_list_head = NULL; |
712 |
default_role = NULL; |
713 |
|
714 |
return; |
715 |
} |
716 |
|
717 |
static __u32 |
718 |
count_user_objs(struct acl_object_label *userp) |
719 |
{ |
720 |
struct acl_object_label o_tmp; |
721 |
__u32 num = 0; |
722 |
|
723 |
while (userp) { |
724 |
if (copy_from_user(&o_tmp, userp, |
725 |
sizeof (struct acl_object_label))) |
726 |
break; |
727 |
|
728 |
userp = o_tmp.prev; |
729 |
num++; |
730 |
} |
731 |
|
732 |
return num; |
733 |
} |
734 |
|
735 |
static struct acl_subject_label * |
736 |
do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role); |
737 |
|
738 |
static int |
739 |
copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj, |
740 |
struct acl_role_label *role) |
741 |
{ |
742 |
struct acl_object_label *o_tmp; |
743 |
unsigned int len; |
744 |
char *tmp; |
745 |
|
746 |
while (userp) { |
747 |
if ((o_tmp = (struct acl_object_label *) |
748 |
acl_alloc(sizeof (struct acl_object_label))) == NULL) |
749 |
return -ENOMEM; |
750 |
|
751 |
if (copy_from_user(o_tmp, userp, |
752 |
sizeof (struct acl_object_label))) |
753 |
return -EFAULT; |
754 |
|
755 |
userp = o_tmp->prev; |
756 |
|
757 |
len = strnlen_user(o_tmp->filename, PATH_MAX); |
758 |
|
759 |
if (!len || len >= PATH_MAX) |
760 |
return -EINVAL; |
761 |
|
762 |
if ((tmp = (char *) acl_alloc(len)) == NULL) |
763 |
return -ENOMEM; |
764 |
|
765 |
if (copy_from_user(tmp, o_tmp->filename, len)) |
766 |
return -EFAULT; |
767 |
|
768 |
o_tmp->filename = tmp; |
769 |
|
770 |
insert_acl_obj_label(o_tmp, subj); |
771 |
if (!insert_name_entry(o_tmp->filename, o_tmp->inode, |
772 |
o_tmp->device)) |
773 |
return -ENOMEM; |
774 |
|
775 |
if (o_tmp->nested) { |
776 |
o_tmp->nested = do_copy_user_subj(o_tmp->nested, role); |
777 |
if (IS_ERR(o_tmp->nested)) |
778 |
return PTR_ERR(o_tmp->nested); |
779 |
|
780 |
s_final = o_tmp->nested; |
781 |
} |
782 |
} |
783 |
|
784 |
return 0; |
785 |
} |
786 |
|
787 |
static __u32 |
788 |
count_user_subjs(struct acl_subject_label *userp) |
789 |
{ |
790 |
struct acl_subject_label s_tmp; |
791 |
__u32 num = 0; |
792 |
|
793 |
while (userp) { |
794 |
if (copy_from_user(&s_tmp, userp, |
795 |
sizeof (struct acl_subject_label))) |
796 |
break; |
797 |
|
798 |
userp = s_tmp.prev; |
799 |
/* do not count nested subjects against this count, since |
800 |
they are not included in the hash table, but are |
801 |
attached to objects. We have already counted |
802 |
the subjects in userspace for the allocation |
803 |
stack |
804 |
*/ |
805 |
if (!s_tmp.parent_subject) |
806 |
num++; |
807 |
} |
808 |
|
809 |
return num; |
810 |
} |
811 |
|
812 |
static int |
813 |
copy_user_allowedips(struct acl_role_label *rolep) |
814 |
{ |
815 |
struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast; |
816 |
|
817 |
ruserip = rolep->allowed_ips; |
818 |
|
819 |
while (ruserip) { |
820 |
rlast = rtmp; |
821 |
|
822 |
if ((rtmp = (struct role_allowed_ip *) |
823 |
acl_alloc(sizeof (struct role_allowed_ip))) == NULL) |
824 |
return -ENOMEM; |
825 |
|
826 |
if (copy_from_user(rtmp, ruserip, |
827 |
sizeof (struct role_allowed_ip))) |
828 |
return -EFAULT; |
829 |
|
830 |
ruserip = rtmp->prev; |
831 |
|
832 |
if (!rlast) { |
833 |
rtmp->prev = NULL; |
834 |
rolep->allowed_ips = rtmp; |
835 |
} else { |
836 |
rlast->next = rtmp; |
837 |
rtmp->prev = rlast; |
838 |
} |
839 |
|
840 |
if (!ruserip) |
841 |
rtmp->next = NULL; |
842 |
} |
843 |
|
844 |
return 0; |
845 |
} |
846 |
|
847 |
static int |
848 |
copy_user_transitions(struct acl_role_label *rolep) |
849 |
{ |
850 |
struct role_transition *rusertp, *rtmp = NULL, *rlast; |
851 |
unsigned int len; |
852 |
char *tmp; |
853 |
|
854 |
rusertp = rolep->transitions; |
855 |
|
856 |
while (rusertp) { |
857 |
rlast = rtmp; |
858 |
|
859 |
if ((rtmp = (struct role_transition *) |
860 |
acl_alloc(sizeof (struct role_transition))) == NULL) |
861 |
return -ENOMEM; |
862 |
|
863 |
if (copy_from_user(rtmp, rusertp, |
864 |
sizeof (struct role_transition))) |
865 |
return -EFAULT; |
866 |
|
867 |
rusertp = rtmp->prev; |
868 |
|
869 |
len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN); |
870 |
|
871 |
if (!len || len >= GR_SPROLE_LEN) |
872 |
return -EINVAL; |
873 |
|
874 |
if ((tmp = (char *) acl_alloc(len)) == NULL) |
875 |
return -ENOMEM; |
876 |
|
877 |
if (copy_from_user(tmp, rtmp->rolename, len)) |
878 |
return -EFAULT; |
879 |
|
880 |
rtmp->rolename = tmp; |
881 |
|
882 |
if (!rlast) { |
883 |
rtmp->prev = NULL; |
884 |
rolep->transitions = rtmp; |
885 |
} else { |
886 |
rlast->next = rtmp; |
887 |
rtmp->prev = rlast; |
888 |
} |
889 |
|
890 |
if (!rusertp) |
891 |
rtmp->next = NULL; |
892 |
} |
893 |
|
894 |
return 0; |
895 |
} |
896 |
|
897 |
static struct acl_subject_label * |
898 |
do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role) |
899 |
{ |
900 |
struct acl_subject_label *s_tmp = NULL; |
901 |
unsigned int len; |
902 |
char *tmp; |
903 |
__u32 num_objs; |
904 |
struct acl_ip_label **i_tmp, *i_utmp2; |
905 |
unsigned long i_num; |
906 |
int err; |
907 |
|
908 |
|
909 |
if ((s_tmp = (struct acl_subject_label *) |
910 |
acl_alloc(sizeof (struct acl_subject_label))) == NULL) |
911 |
return ERR_PTR(-ENOMEM); |
912 |
|
913 |
if (copy_from_user(s_tmp, userp, |
914 |
sizeof (struct acl_subject_label))) |
915 |
return ERR_PTR(-EFAULT); |
916 |
|
917 |
if (!s_last) { |
918 |
s_tmp->prev = NULL; |
919 |
role->proc_subject = s_tmp; |
920 |
} else { |
921 |
s_last->next = s_tmp; |
922 |
s_tmp->prev = s_last; |
923 |
} |
924 |
|
925 |
s_last = s_tmp; |
926 |
|
927 |
len = strnlen_user(s_tmp->filename, PATH_MAX); |
928 |
|
929 |
if (!len || len >= PATH_MAX) |
930 |
return ERR_PTR(-EINVAL); |
931 |
|
932 |
if ((tmp = (char *) acl_alloc(len)) == NULL) |
933 |
return ERR_PTR(-ENOMEM); |
934 |
|
935 |
if (copy_from_user(tmp, s_tmp->filename, len)) |
936 |
return ERR_PTR(-EFAULT); |
937 |
|
938 |
s_tmp->filename = tmp; |
939 |
|
940 |
if (!strcmp(s_tmp->filename, "/")) |
941 |
role->root_label = s_tmp; |
942 |
|
943 |
/* set up object hash table */ |
944 |
num_objs = count_user_objs(s_tmp->proc_object); |
945 |
|
946 |
s_tmp->obj_hash_size = num_objs; |
947 |
s_tmp->obj_hash = |
948 |
(struct acl_object_label **) |
949 |
create_table(&(s_tmp->obj_hash_size)); |
950 |
|
951 |
if (!s_tmp->obj_hash) |
952 |
return ERR_PTR(-ENOMEM); |
953 |
|
954 |
memset(s_tmp->obj_hash, 0, |
955 |
s_tmp->obj_hash_size * |
956 |
sizeof (struct acl_object_label *)); |
957 |
|
958 |
/* copy before adding in objects, since a nested |
959 |
acl could be found and be the final subject |
960 |
copied |
961 |
*/ |
962 |
|
963 |
s_final = s_tmp; |
964 |
|
965 |
/* add in objects */ |
966 |
err = copy_user_objs(s_tmp->proc_object, s_tmp, role); |
967 |
|
968 |
if (err) |
969 |
return ERR_PTR(err); |
970 |
|
971 |
/* add in ip acls */ |
972 |
|
973 |
if (!s_tmp->ip_num) { |
974 |
s_tmp->ips = NULL; |
975 |
goto insert; |
976 |
} |
977 |
|
978 |
i_tmp = |
979 |
(struct acl_ip_label **) acl_alloc(s_tmp->ip_num * |
980 |
sizeof (struct |
981 |
acl_ip_label *)); |
982 |
|
983 |
if (!i_tmp) |
984 |
return ERR_PTR(-ENOMEM); |
985 |
|
986 |
for (i_num = 0; i_num < s_tmp->ip_num; i_num++) { |
987 |
*(i_tmp + i_num) = |
988 |
(struct acl_ip_label *) |
989 |
acl_alloc(sizeof (struct acl_ip_label)); |
990 |
if (!*(i_tmp + i_num)) |
991 |
return ERR_PTR(-ENOMEM); |
992 |
|
993 |
if (copy_from_user |
994 |
(&i_utmp2, s_tmp->ips + i_num, |
995 |
sizeof (struct acl_ip_label *))) |
996 |
return ERR_PTR(-EFAULT); |
997 |
|
998 |
if (copy_from_user |
999 |
(*(i_tmp + i_num), i_utmp2, |
1000 |
sizeof (struct acl_ip_label))) |
1001 |
return ERR_PTR(-EFAULT); |
1002 |
} |
1003 |
|
1004 |
s_tmp->ips = i_tmp; |
1005 |
|
1006 |
insert: |
1007 |
if (!insert_name_entry(s_tmp->filename, s_tmp->inode, |
1008 |
s_tmp->device)) |
1009 |
return ERR_PTR(-ENOMEM); |
1010 |
|
1011 |
return s_tmp; |
1012 |
} |
1013 |
|
1014 |
static int |
1015 |
copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role) |
1016 |
{ |
1017 |
struct acl_subject_label s_pre; |
1018 |
struct acl_subject_label * ret; |
1019 |
int err; |
1020 |
|
1021 |
while (userp) { |
1022 |
if (copy_from_user(&s_pre, userp, |
1023 |
sizeof (struct acl_subject_label))) |
1024 |
return -EFAULT; |
1025 |
|
1026 |
/* do not add nested subjects here, add |
1027 |
while parsing objects |
1028 |
*/ |
1029 |
|
1030 |
if (s_pre.parent_subject) { |
1031 |
userp = s_pre.prev; |
1032 |
continue; |
1033 |
} |
1034 |
|
1035 |
ret = do_copy_user_subj(userp, role); |
1036 |
|
1037 |
err = PTR_ERR(ret); |
1038 |
if (IS_ERR(ret)) |
1039 |
return err; |
1040 |
|
1041 |
insert_acl_subj_label(ret, role); |
1042 |
|
1043 |
userp = s_pre.prev; |
1044 |
} |
1045 |
|
1046 |
s_final->next = NULL; |
1047 |
|
1048 |
return 0; |
1049 |
} |
1050 |
|
1051 |
static int |
1052 |
copy_user_acl(struct gr_arg *arg) |
1053 |
{ |
1054 |
struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2, *r_last; |
1055 |
struct sprole_pw *sptmp; |
1056 |
unsigned long r_num; |
1057 |
unsigned int len; |
1058 |
char *tmp; |
1059 |
int err = 0; |
1060 |
__u16 i; |
1061 |
__u32 num_subjs; |
1062 |
|
1063 |
/* we need a default and kernel role */ |
1064 |
if (arg->role_db.r_entries < 2) |
1065 |
return -EINVAL; |
1066 |
|
1067 |
/* copy special role authentication info from userspace */ |
1068 |
|
1069 |
num_sprole_pws = arg->num_sprole_pws; |
1070 |
acl_special_roles = (struct sprole_pw **) acl_alloc(num_sprole_pws * sizeof(struct sprole_pw *)); |
1071 |
|
1072 |
if (!acl_special_roles) { |
1073 |
err = -ENOMEM; |
1074 |
goto cleanup; |
1075 |
} |
1076 |
|
1077 |
for (i = 0; i < num_sprole_pws; i++) { |
1078 |
sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw)); |
1079 |
if (!sptmp) { |
1080 |
err = -ENOMEM; |
1081 |
goto cleanup; |
1082 |
} |
1083 |
if (copy_from_user(sptmp, arg->sprole_pws + i, |
1084 |
sizeof (struct sprole_pw))) { |
1085 |
err = -EFAULT; |
1086 |
goto cleanup; |
1087 |
} |
1088 |
|
1089 |
len = |
1090 |
strnlen_user(sptmp->rolename, GR_SPROLE_LEN); |
1091 |
|
1092 |
if (!len || len >= GR_SPROLE_LEN) { |
1093 |
err = -EINVAL; |
1094 |
goto cleanup; |
1095 |
} |
1096 |
|
1097 |
if ((tmp = (char *) acl_alloc(len)) == NULL) { |
1098 |
err = -ENOMEM; |
1099 |
goto cleanup; |
1100 |
} |
1101 |
|
1102 |
if (copy_from_user(tmp, sptmp->rolename, len)) { |
1103 |
err = -EFAULT; |
1104 |
goto cleanup; |
1105 |
} |
1106 |
|
1107 |
#ifdef CONFIG_GRKERNSEC_ACL_DEBUG |
1108 |
printk(KERN_ALERT "Copying special role %s\n", tmp); |
1109 |
#endif |
1110 |
sptmp->rolename = tmp; |
1111 |
acl_special_roles[i] = sptmp; |
1112 |
} |
1113 |
|
1114 |
r_utmp = (struct acl_role_label **) arg->role_db.r_table; |
1115 |
|
1116 |
for (r_num = 0; r_num < arg->role_db.r_entries; r_num++) { |
1117 |
r_last = r_tmp; |
1118 |
|
1119 |
r_tmp = acl_alloc(sizeof (struct acl_role_label)); |
1120 |
|
1121 |
if (!r_tmp) { |
1122 |
err = -ENOMEM; |
1123 |
goto cleanup; |
1124 |
} |
1125 |
|
1126 |
if (copy_from_user(&r_utmp2, r_utmp + r_num, |
1127 |
sizeof (struct acl_role_label *))) { |
1128 |
err = -EFAULT; |
1129 |
goto cleanup; |
1130 |
} |
1131 |
|
1132 |
if (copy_from_user(r_tmp, r_utmp2, |
1133 |
sizeof (struct acl_role_label))) { |
1134 |
err = -EFAULT; |
1135 |
goto cleanup; |
1136 |
} |
1137 |
|
1138 |
if (!r_last) { |
1139 |
r_tmp->prev = NULL; |
1140 |
role_list_head = r_tmp; |
1141 |
} else { |
1142 |
r_last->next = r_tmp; |
1143 |
r_tmp->prev = r_last; |
1144 |
} |
1145 |
|
1146 |
if (r_num == (arg->role_db.r_entries - 1)) |
1147 |
r_tmp->next = NULL; |
1148 |
|
1149 |
len = strnlen_user(r_tmp->rolename, PATH_MAX); |
1150 |
|
1151 |
if (!len || len >= PATH_MAX) { |
1152 |
err = -EINVAL; |
1153 |
goto cleanup; |
1154 |
} |
1155 |
|
1156 |
if ((tmp = (char *) acl_alloc(len)) == NULL) { |
1157 |
err = -ENOMEM; |
1158 |
goto cleanup; |
1159 |
} |
1160 |
if (copy_from_user(tmp, r_tmp->rolename, len)) { |
1161 |
err = -EFAULT; |
1162 |
goto cleanup; |
1163 |
} |
1164 |
r_tmp->rolename = tmp; |
1165 |
|
1166 |
if (!strcmp(r_tmp->rolename, "default") |
1167 |
&& (r_tmp->roletype & GR_ROLE_DEFAULT)) { |
1168 |
default_role = r_tmp; |
1169 |
} else if (!strcmp(r_tmp->rolename, ":::kernel:::")) { |
1170 |
kernel_role = r_tmp; |
1171 |
} |
1172 |
|
1173 |
num_subjs = count_user_subjs(r_tmp->proc_subject); |
1174 |
|
1175 |
r_tmp->subj_hash_size = num_subjs; |
1176 |
r_tmp->subj_hash = |
1177 |
(struct acl_subject_label **) |
1178 |
create_table(&(r_tmp->subj_hash_size)); |
1179 |
|
1180 |
if (!r_tmp->subj_hash) { |
1181 |
err = -ENOMEM; |
1182 |
goto cleanup; |
1183 |
} |
1184 |
|
1185 |
err = copy_user_allowedips(r_tmp); |
1186 |
if (err) |
1187 |
goto cleanup; |
1188 |
|
1189 |
err = copy_user_transitions(r_tmp); |
1190 |
if (err) |
1191 |
goto cleanup; |
1192 |
|
1193 |
memset(r_tmp->subj_hash, 0, |
1194 |
r_tmp->subj_hash_size * |
1195 |
sizeof (struct acl_subject_label *)); |
1196 |
|
1197 |
s_last = NULL; |
1198 |
|
1199 |
err = copy_user_subjs(r_tmp->proc_subject, r_tmp); |
1200 |
|
1201 |
if (err) |
1202 |
goto cleanup; |
1203 |
|
1204 |
insert_acl_role_label(r_tmp); |
1205 |
} |
1206 |
|
1207 |
goto return_err; |
1208 |
cleanup: |
1209 |
free_variables(); |
1210 |
return_err: |
1211 |
return err; |
1212 |
|
1213 |
} |
1214 |
|
1215 |
static int |
1216 |
gracl_init(struct gr_arg *args) |
1217 |
{ |
1218 |
int error = 0; |
1219 |
|
1220 |
memcpy(gr_system_salt, args->salt, GR_SALT_LEN); |
1221 |
memcpy(gr_system_sum, args->sum, GR_SHA_LEN); |
1222 |
|
1223 |
if (init_variables(args->role_db.o_entries, args->role_db.s_entries, |
1224 |
args->role_db.i_entries, args->role_db.r_entries, |
1225 |
args->role_db.a_entries, args->role_db.t_entries, |
1226 |
args->num_sprole_pws)) { |
1227 |
security_alert_good(GR_INITF_ACL_MSG, GR_VERSION); |
1228 |
error = -ENOMEM; |
1229 |
free_variables(); |
1230 |
goto out; |
1231 |
} |
1232 |
|
1233 |
error = copy_user_acl(args); |
1234 |
if (error) |
1235 |
goto out; |
1236 |
|
1237 |
if ((error = gr_set_acls(0))) { |
1238 |
free_variables(); |
1239 |
goto out; |
1240 |
} |
1241 |
|
1242 |
gr_status |= GR_READY; |
1243 |
out: |
1244 |
return error; |
1245 |
} |
1246 |
|
1247 |
static struct acl_object_label * |
1248 |
chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, |
1249 |
const struct acl_subject_label *subj) |
1250 |
{ |
1251 |
struct dentry *dentry = (struct dentry *) l_dentry; |
1252 |
struct vfsmount *mnt = (struct vfsmount *) l_mnt; |
1253 |
struct dentry *root; |
1254 |
struct vfsmount *rootmnt; |
1255 |
struct acl_object_label *retval; |
1256 |
|
1257 |
read_lock(&child_reaper->fs->lock); |
1258 |
rootmnt = mntget(child_reaper->fs->rootmnt); |
1259 |
root = dget(child_reaper->fs->root); |
1260 |
read_unlock(&child_reaper->fs->lock); |
1261 |
spin_lock(&dcache_lock); |
1262 |
|
1263 |
for (;;) { |
1264 |
if (unlikely(dentry == root && mnt == rootmnt)) |
1265 |
break; |
1266 |
if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) { |
1267 |
if (mnt->mnt_parent == mnt) |
1268 |
break; |
1269 |
|
1270 |
read_lock(&gr_inode_lock); |
1271 |
retval = |
1272 |
lookup_acl_obj_label(dentry->d_inode->i_ino, |
1273 |
dentry->d_inode->i_dev, subj); |
1274 |
read_unlock(&gr_inode_lock); |
1275 |
if (unlikely(retval != NULL)) |
1276 |
goto out; |
1277 |
|
1278 |
dentry = mnt->mnt_mountpoint; |
1279 |
mnt = mnt->mnt_parent; |
1280 |
continue; |
1281 |
} |
1282 |
|
1283 |
read_lock(&gr_inode_lock); |
1284 |
retval = |
1285 |
lookup_acl_obj_label(dentry->d_inode->i_ino, |
1286 |
dentry->d_inode->i_dev, subj); |
1287 |
read_unlock(&gr_inode_lock); |
1288 |
if (unlikely(retval != NULL)) |
1289 |
goto out; |
1290 |
|
1291 |
dentry = dentry->d_parent; |
1292 |
} |
1293 |
|
1294 |
read_lock(&gr_inode_lock); |
1295 |
retval = |
1296 |
lookup_acl_obj_label(dentry->d_inode->i_ino, dentry->d_inode->i_dev, |
1297 |
subj); |
1298 |
read_unlock(&gr_inode_lock); |
1299 |
|
1300 |
if (unlikely(retval == NULL)) { |
1301 |
read_lock(&gr_inode_lock); |
1302 |
retval = |
1303 |
lookup_acl_obj_label(root->d_inode->i_ino, |
1304 |
root->d_inode->i_dev, subj); |
1305 |
read_unlock(&gr_inode_lock); |
1306 |
} |
1307 |
out: |
1308 |
spin_unlock(&dcache_lock); |
1309 |
dput(root); |
1310 |
mntput(rootmnt); |
1311 |
|
1312 |
return retval; |
1313 |
} |
1314 |
|
1315 |
static struct acl_subject_label * |
1316 |
chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, |
1317 |
const struct acl_role_label *role) |
1318 |
{ |
1319 |
struct dentry *dentry = (struct dentry *) l_dentry; |
1320 |
struct vfsmount *mnt = (struct vfsmount *) l_mnt; |
1321 |
struct dentry *root; |
1322 |
struct vfsmount *rootmnt; |
1323 |
struct acl_subject_label *retval; |
1324 |
|
1325 |
read_lock(&child_reaper->fs->lock); |
1326 |
rootmnt = mntget(child_reaper->fs->rootmnt); |
1327 |
root = dget(child_reaper->fs->root); |
1328 |
read_unlock(&child_reaper->fs->lock); |
1329 |
spin_lock(&dcache_lock); |
1330 |
|
1331 |
for (;;) { |
1332 |
if (unlikely(dentry == root && mnt == rootmnt)) |
1333 |
break; |
1334 |
if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) { |
1335 |
if (mnt->mnt_parent == mnt) |
1336 |
break; |
1337 |
|
1338 |
read_lock(&gr_inode_lock); |
1339 |
retval = |
1340 |
lookup_acl_subj_label(dentry->d_inode->i_ino, |
1341 |
dentry->d_inode->i_dev, role); |
1342 |
read_unlock(&gr_inode_lock); |
1343 |
if (unlikely(retval != NULL)) |
1344 |
goto out; |
1345 |
|
1346 |
dentry = mnt->mnt_mountpoint; |
1347 |
mnt = mnt->mnt_parent; |
1348 |
continue; |
1349 |
} |
1350 |
|
1351 |
read_lock(&gr_inode_lock); |
1352 |
retval = |
1353 |
lookup_acl_subj_label(dentry->d_inode->i_ino, |
1354 |
dentry->d_inode->i_dev, role); |
1355 |
read_unlock(&gr_inode_lock); |
1356 |
if (unlikely(retval != NULL)) |
1357 |
goto out; |
1358 |
|
1359 |
dentry = dentry->d_parent; |
1360 |
} |
1361 |
|
1362 |
read_lock(&gr_inode_lock); |
1363 |
retval = |
1364 |
lookup_acl_subj_label(dentry->d_inode->i_ino, |
1365 |
dentry->d_inode->i_dev, role); |
1366 |
read_unlock(&gr_inode_lock); |
1367 |
|
1368 |
if (unlikely(retval == NULL)) { |
1369 |
read_lock(&gr_inode_lock); |
1370 |
retval = |
1371 |
lookup_acl_subj_label(root->d_inode->i_ino, |
1372 |
root->d_inode->i_dev, role); |
1373 |
read_unlock(&gr_inode_lock); |
1374 |
} |
1375 |
out: |
1376 |
spin_unlock(&dcache_lock); |
1377 |
dput(root); |
1378 |
mntput(rootmnt); |
1379 |
|
1380 |
return retval; |
1381 |
} |
1382 |
|
1383 |
static __inline__ void |
1384 |
gr_log_learn(const struct acl_role_label *role, const uid_t uid, const gid_t gid, |
1385 |
const struct task_struct *task, const char *pathname, |
1386 |
const __u32 mode) |
1387 |
{ |
1388 |
security_learn(GR_LEARN_AUDIT_MSG, role->rolename, role->roletype, |
1389 |
uid, gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry, |
1390 |
task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename, |
1391 |
1, 1, pathname, (unsigned long) mode, NIPQUAD(task->curr_ip)); |
1392 |
|
1393 |
return; |
1394 |
} |
1395 |
|
1396 |
__u32 |
1397 |
gr_check_link(const struct dentry * new_dentry, |
1398 |
const struct dentry * parent_dentry, |
1399 |
const struct vfsmount * parent_mnt, |
1400 |
const struct dentry * old_dentry, const struct vfsmount * old_mnt) |
1401 |
{ |
1402 |
struct acl_object_label *obj; |
1403 |
__u32 oldmode, newmode; |
1404 |
|
1405 |
if (unlikely(!(gr_status & GR_READY))) |
1406 |
return (GR_WRITE | GR_CREATE); |
1407 |
|
1408 |
obj = chk_obj_label(old_dentry, old_mnt, current->acl); |
1409 |
oldmode = obj->mode; |
1410 |
|
1411 |
if (current->acl->mode & GR_LEARN) |
1412 |
oldmode |= (GR_WRITE | GR_CREATE); |
1413 |
newmode = |
1414 |
gr_check_create(new_dentry, parent_dentry, parent_mnt, |
1415 |
oldmode | GR_CREATE | GR_AUDIT_CREATE | |
1416 |
GR_AUDIT_WRITE | GR_SUPPRESS); |
1417 |
|
1418 |
if ((newmode & oldmode) == oldmode) |
1419 |
return newmode; |
1420 |
else if (current->acl->mode & GR_LEARN) { |
1421 |
gr_log_learn(current->role, current->uid, current->gid, |
1422 |
current, gr_to_filename(old_dentry, old_mnt), oldmode); |
1423 |
return (GR_WRITE | GR_CREATE); |
1424 |
} else if (newmode & GR_SUPPRESS) |
1425 |
return GR_SUPPRESS; |
1426 |
else |
1427 |
return 0; |
1428 |
} |
1429 |
|
1430 |
__u32 |
1431 |
gr_search_file(const struct dentry * dentry, const __u32 mode, |
1432 |
const struct vfsmount * mnt) |
1433 |
{ |
1434 |
__u32 retval = mode; |
1435 |
struct acl_subject_label *curracl; |
1436 |
struct acl_object_label *currobj; |
1437 |
|
1438 |
if (unlikely(!(gr_status & GR_READY))) |
1439 |
return (mode & ~GR_AUDITS); |
1440 |
|
1441 |
curracl = current->acl; |
1442 |
|
1443 |
currobj = chk_obj_label(dentry, mnt, curracl); |
1444 |
retval = currobj->mode & mode; |
1445 |
|
1446 |
if (unlikely |
1447 |
((curracl->mode & GR_LEARN) && (mode != GR_PTRACERD) |
1448 |
&& (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) { |
1449 |
__u32 new_mode = mode; |
1450 |
|
1451 |
new_mode &= ~(GR_AUDITS | GR_SUPPRESS); |
1452 |
|
1453 |
retval = new_mode; |
1454 |
|
1455 |
if (!(mode & GR_NOLEARN)) |
1456 |
gr_log_learn(current->role, current->uid, current->gid, |
1457 |
current, gr_to_filename(dentry, mnt), new_mode); |
1458 |
} |
1459 |
|
1460 |
return retval; |
1461 |
} |
1462 |
|
1463 |
__u32 |
1464 |
gr_check_create(const struct dentry * new_dentry, const struct dentry * parent, |
1465 |
const struct vfsmount * mnt, const __u32 mode) |
1466 |
{ |
1467 |
struct name_entry *match; |
1468 |
struct acl_object_label *matchpo; |
1469 |
struct acl_subject_label *curracl; |
1470 |
__u32 retval; |
1471 |
|
1472 |
if (unlikely(!(gr_status & GR_READY))) |
1473 |
return (mode & ~GR_AUDITS); |
1474 |
|
1475 |
match = lookup_name_entry(gr_to_filename(new_dentry, mnt)); |
1476 |
|
1477 |
if (!match) |
1478 |
goto check_parent; |
1479 |
|
1480 |
curracl = current->acl; |
1481 |
|
1482 |
read_lock(&gr_inode_lock); |
1483 |
matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl); |
1484 |
read_unlock(&gr_inode_lock); |
1485 |
|
1486 |
if (matchpo) { |
1487 |
if ((matchpo->mode & mode) != |
1488 |
(mode & ~(GR_AUDITS | GR_SUPPRESS)) |
1489 |
&& curracl->mode & GR_LEARN) { |
1490 |
__u32 new_mode = mode; |
1491 |
|
1492 |
new_mode &= ~(GR_AUDITS | GR_SUPPRESS); |
1493 |
|
1494 |
gr_log_learn(current->role, current->uid, current->gid, |
1495 |
current, gr_to_filename(new_dentry, mnt), new_mode); |
1496 |
|
1497 |
return new_mode; |
1498 |
} |
1499 |
return (matchpo->mode & mode); |
1500 |
} |
1501 |
|
1502 |
check_parent: |
1503 |
curracl = current->acl; |
1504 |
|
1505 |
matchpo = chk_obj_label(parent, mnt, curracl); |
1506 |
retval = matchpo->mode & mode; |
1507 |
|
1508 |
if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))) |
1509 |
&& (curracl->mode & GR_LEARN)) { |
1510 |
__u32 new_mode = mode; |
1511 |
|
1512 |
new_mode &= ~(GR_AUDITS | GR_SUPPRESS); |
1513 |
|
1514 |
gr_log_learn(current->role, current->uid, current->gid, |
1515 |
current, gr_to_filename(new_dentry, mnt), new_mode); |
1516 |
return new_mode; |
1517 |
} |
1518 |
|
1519 |
return retval; |
1520 |
} |
1521 |
|
1522 |
int |
1523 |
gr_check_hidden_task(const struct task_struct *task) |
1524 |
{ |
1525 |
if (unlikely(!(gr_status & GR_READY))) |
1526 |
return 0; |
1527 |
|
1528 |
if (!(task->acl->mode & GR_FIND) && !(current->acl->mode & GR_VIEW)) |
1529 |
return 1; |
1530 |
|
1531 |
return 0; |
1532 |
} |
1533 |
|
1534 |
int |
1535 |
gr_check_protected_task(const struct task_struct *task) |
1536 |
{ |
1537 |
if (unlikely(!(gr_status & GR_READY) || !task)) |
1538 |
return 0; |
1539 |
|
1540 |
if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL)) |
1541 |
return 1; |
1542 |
|
1543 |
return 0; |
1544 |
} |
1545 |
|
1546 |
__inline__ void |
1547 |
gr_copy_label(struct task_struct *tsk) |
1548 |
{ |
1549 |
tsk->used_accept = 0; |
1550 |
tsk->used_connect = 0; |
1551 |
tsk->acl_sp_role = 0; |
1552 |
tsk->acl_role_id = current->acl_role_id; |
1553 |
tsk->acl = current->acl; |
1554 |
tsk->role = current->role; |
1555 |
tsk->curr_ip = current->curr_ip; |
1556 |
if (current->exec_file) |
1557 |
get_file(current->exec_file); |
1558 |
tsk->exec_file = current->exec_file; |
1559 |
tsk->is_writable = current->is_writable; |
1560 |
if (unlikely(current->used_accept)) |
1561 |
current->curr_ip = 0; |
1562 |
|
1563 |
return; |
1564 |
} |
1565 |
|
1566 |
static __inline__ void |
1567 |
gr_set_proc_res(void) |
1568 |
{ |
1569 |
struct acl_subject_label *proc; |
1570 |
unsigned short i; |
1571 |
|
1572 |
proc = current->acl; |
1573 |
|
1574 |
if (proc->mode & GR_LEARN) |
1575 |
return; |
1576 |
|
1577 |
for (i = 0; i < RLIM_NLIMITS; i++) { |
1578 |
if (!(proc->resmask & (1 << i))) |
1579 |
continue; |
1580 |
|
1581 |
current->rlim[i].rlim_cur = proc->res[i].rlim_cur; |
1582 |
current->rlim[i].rlim_max = proc->res[i].rlim_max; |
1583 |
} |
1584 |
|
1585 |
return; |
1586 |
} |
1587 |
|
1588 |
void |
1589 |
gr_set_pax_flags(struct task_struct *task) |
1590 |
{ |
1591 |
struct acl_subject_label *proc; |
1592 |
|
1593 |
if (unlikely(!(gr_status & GR_READY))) |
1594 |
return; |
1595 |
|
1596 |
proc = task->acl; |
1597 |
|
1598 |
if (proc->mode & GR_PAXPAGE) |
1599 |
task->flags &= ~PF_PAX_PAGEEXEC; |
1600 |
if (proc->mode & GR_PAXSEGM) |
1601 |
task->flags &= ~PF_PAX_SEGMEXEC; |
1602 |
if (proc->mode & GR_PAXGCC) |
1603 |
task->flags |= PF_PAX_EMUTRAMP; |
1604 |
if (proc->mode & GR_PAXMPROTECT) |
1605 |
task->flags &= ~PF_PAX_MPROTECT; |
1606 |
if (proc->mode & GR_PAXRANDMMAP) |
1607 |
task->flags &= ~PF_PAX_RANDMMAP; |
1608 |
if (proc->mode & GR_PAXRANDEXEC) |
1609 |
task->flags |= PF_PAX_RANDEXEC; |
1610 |
|
1611 |
return; |
1612 |
} |
1613 |
|
1614 |
static __inline__ void |
1615 |
do_set_role_label(struct task_struct *task, const uid_t uid, const gid_t gid) |
1616 |
{ |
1617 |
task->role = lookup_acl_role_label(task, uid, gid); |
1618 |
|
1619 |
return; |
1620 |
} |
1621 |
|
1622 |
void |
1623 |
gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid) |
1624 |
{ |
1625 |
struct acl_object_label *obj; |
1626 |
struct file *filp; |
1627 |
|
1628 |
if (unlikely(!(gr_status & GR_READY))) |
1629 |
return; |
1630 |
|
1631 |
filp = task->exec_file; |
1632 |
|
1633 |
/* kernel process, we'll give them the kernel role */ |
1634 |
if (unlikely(!filp)) { |
1635 |
task->role = kernel_role; |
1636 |
task->acl = kernel_role->root_label; |
1637 |
return; |
1638 |
} else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) |
1639 |
do_set_role_label(task, uid, gid); |
1640 |
|
1641 |
task->acl = |
1642 |
chk_subj_label(filp->f_dentry, filp->f_vfsmnt, task->role); |
1643 |
|
1644 |
task->is_writable = 0; |
1645 |
|
1646 |
/* ignore additional mmap checks for processes that are writable |
1647 |
by the default ACL */ |
1648 |
obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); |
1649 |
if (unlikely(obj->mode & GR_WRITE)) |
1650 |
task->is_writable = 1; |
1651 |
obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label); |
1652 |
if (unlikely(obj->mode & GR_WRITE)) |
1653 |
task->is_writable = 1; |
1654 |
|
1655 |
#ifdef CONFIG_GRKERNSEC_ACL_DEBUG |
1656 |
printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); |
1657 |
#endif |
1658 |
|
1659 |
gr_set_proc_res(); |
1660 |
|
1661 |
return; |
1662 |
} |
1663 |
|
1664 |
void |
1665 |
gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt) |
1666 |
{ |
1667 |
struct acl_subject_label *newacl; |
1668 |
struct acl_object_label *obj; |
1669 |
__u32 retmode; |
1670 |
|
1671 |
if (unlikely(!(gr_status & GR_READY))) |
1672 |
return; |
1673 |
|
1674 |
newacl = chk_subj_label(dentry, mnt, current->role); |
1675 |
|
1676 |
obj = chk_obj_label(dentry, mnt, current->acl); |
1677 |
retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT); |
1678 |
|
1679 |
if ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT)) { |
1680 |
if (obj->nested) |
1681 |
current->acl = obj->nested; |
1682 |
else |
1683 |
current->acl = newacl; |
1684 |
} else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT) |
1685 |
security_audit(GR_INHERIT_ACL_MSG, current->acl->filename, |
1686 |
gr_to_filename(dentry, mnt), DEFAULTSECARGS); |
1687 |
|
1688 |
current->is_writable = 0; |
1689 |
|
1690 |
/* ignore additional mmap checks for processes that are writable |
1691 |
by the default ACL */ |
1692 |
obj = chk_obj_label(dentry, mnt, default_role->root_label); |
1693 |
if (unlikely(obj->mode & GR_WRITE)) |
1694 |
current->is_writable = 1; |
1695 |
obj = chk_obj_label(dentry, mnt, current->role->root_label); |
1696 |
if (unlikely(obj->mode & GR_WRITE)) |
1697 |
current->is_writable = 1; |
1698 |
|
1699 |
gr_set_proc_res(); |
1700 |
|
1701 |
#ifdef CONFIG_GRKERNSEC_ACL_DEBUG |
1702 |
printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", current->comm, current->pid, current->role->rolename, current->acl->filename); |
1703 |
#endif |
1704 |
return; |
1705 |
} |
1706 |
|
1707 |
static __inline__ void |
1708 |
do_handle_delete(const ino_t ino, const kdev_t dev) |
1709 |
{ |
1710 |
struct acl_object_label *matchpo; |
1711 |
struct acl_subject_label *matchps; |
1712 |
struct acl_subject_label *i; |
1713 |
struct acl_role_label *role; |
1714 |
|
1715 |
for (role = role_list_head; role; role = role->next) { |
1716 |
for (i = role->proc_subject; i; i = i->next) { |
1717 |
if (unlikely(i->parent_subject && |
1718 |
(i->inode == ino) && |
1719 |
(i->device == dev))) |
1720 |
i->mode |= GR_DELETED; |
1721 |
if (unlikely((matchpo = |
1722 |
lookup_acl_obj_label(ino, dev, i)) != NULL)) |
1723 |
matchpo->mode |= GR_DELETED; |
1724 |
} |
1725 |
|
1726 |
if (unlikely((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)) |
1727 |
matchps->mode |= GR_DELETED; |
1728 |
} |
1729 |
|
1730 |
return; |
1731 |
} |
1732 |
|
1733 |
void |
1734 |
gr_handle_delete(const ino_t ino, const kdev_t dev) |
1735 |
{ |
1736 |
if (unlikely(!(gr_status & GR_READY))) |
1737 |
return; |
1738 |
|
1739 |
write_lock(&gr_inode_lock); |
1740 |
if (unlikely((unsigned long)lookup_inodev_entry(ino, dev))) |
1741 |
do_handle_delete(ino, dev); |
1742 |
write_unlock(&gr_inode_lock); |
1743 |
|
1744 |
return; |
1745 |
} |
1746 |
|
1747 |
static __inline__ void |
1748 |
update_acl_obj_label(const ino_t oldinode, const kdev_t olddevice, |
1749 |
const ino_t newinode, const kdev_t newdevice, |
1750 |
struct acl_subject_label *subj) |
1751 |
{ |
1752 |
unsigned long index = fhash(oldinode, olddevice, subj->obj_hash_size); |
1753 |
struct acl_object_label **match; |
1754 |
struct acl_object_label *tmp; |
1755 |
__u8 i = 0; |
1756 |
|
1757 |
match = &subj->obj_hash[index]; |
1758 |
|
1759 |
while (*match && ((*match)->inode != oldinode || |
1760 |
(*match)->device != olddevice || |
1761 |
!((*match)->mode & GR_DELETED))) { |
1762 |
index = (index + (1 << i)) % subj->obj_hash_size; |
1763 |
match = &subj->obj_hash[index]; |
1764 |
i = (i + 1) % 32; |
1765 |
} |
1766 |
|
1767 |
if (*match && ((*match) != deleted_object) |
1768 |
&& ((*match)->inode == oldinode) |
1769 |
&& ((*match)->device == olddevice) |
1770 |
&& ((*match)->mode & GR_DELETED)) { |
1771 |
tmp = *match; |
1772 |
tmp->inode = newinode; |
1773 |
tmp->device = newdevice; |
1774 |
tmp->mode &= ~GR_DELETED; |
1775 |
|
1776 |
*match = deleted_object; |
1777 |
|
1778 |
insert_acl_obj_label(tmp, subj); |
1779 |
} |
1780 |
|
1781 |
return; |
1782 |
} |
1783 |
|
1784 |
static __inline__ void |
1785 |
update_acl_subj_label(const ino_t oldinode, const kdev_t olddevice, |
1786 |
const ino_t newinode, const kdev_t newdevice, |
1787 |
struct acl_role_label *role) |
1788 |
{ |
1789 |
struct acl_subject_label **s_hash = role->subj_hash; |
1790 |
unsigned long subj_size = role->subj_hash_size; |
1791 |
unsigned long index = fhash(oldinode, olddevice, subj_size); |
1792 |
struct acl_subject_label **match; |
1793 |
struct acl_subject_label *tmp; |
1794 |
__u8 i = 0; |
1795 |
|
1796 |
match = &s_hash[index]; |
1797 |
|
1798 |
while (*match && ((*match)->inode != oldinode || |
1799 |
(*match)->device != olddevice || |
1800 |
!((*match)->mode & GR_DELETED))) { |
1801 |
index = (index + (1 << i)) % subj_size; |
1802 |
i = (i + 1) % 32; |
1803 |
match = &s_hash[index]; |
1804 |
} |
1805 |
|
1806 |
if (*match && (*match != deleted_subject) |
1807 |
&& ((*match)->inode == oldinode) |
1808 |
&& ((*match)->device == olddevice) |
1809 |
&& ((*match)->mode & GR_DELETED)) { |
1810 |
tmp = *match; |
1811 |
|
1812 |
tmp->inode = newinode; |
1813 |
tmp->device = newdevice; |
1814 |
tmp->mode &= ~GR_DELETED; |
1815 |
|
1816 |
*match = deleted_subject; |
1817 |
|
1818 |
insert_acl_subj_label(tmp, role); |
1819 |
} |
1820 |
|
1821 |
return; |
1822 |
} |
1823 |
|
1824 |
static __inline__ void |
1825 |
update_inodev_entry(const ino_t oldinode, const kdev_t olddevice, |
1826 |
const ino_t newinode, const kdev_t newdevice) |
1827 |
{ |
1828 |
unsigned long index = fhash(oldinode, olddevice, inodev_set.n_size); |
1829 |
struct name_entry **match; |
1830 |
struct name_entry *tmp; |
1831 |
__u8 i = 0; |
1832 |
|
1833 |
match = &inodev_set.n_hash[index]; |
1834 |
|
1835 |
while (*match |
1836 |
&& ((*match)->inode != oldinode |
1837 |
|| (*match)->device != olddevice)) { |
1838 |
index = (index + (1 << i)) % inodev_set.n_size; |
1839 |
i = (i + 1) % 32; |
1840 |
match = &inodev_set.n_hash[index]; |
1841 |
} |
1842 |
|
1843 |
if (*match && (*match != deleted_inodev) |
1844 |
&& ((*match)->inode == oldinode) |
1845 |
&& ((*match)->device == olddevice)) { |
1846 |
tmp = *match; |
1847 |
|
1848 |
tmp->inode = newinode; |
1849 |
tmp->device = newdevice; |
1850 |
|
1851 |
*match = deleted_inodev; |
1852 |
|
1853 |
insert_inodev_entry(tmp); |
1854 |
} |
1855 |
|
1856 |
return; |
1857 |
} |
1858 |
|
1859 |
static __inline__ void |
1860 |
do_handle_create(const struct name_entry *matchn, const struct dentry *dentry, |
1861 |
const struct vfsmount *mnt) |
1862 |
{ |
1863 |
struct acl_subject_label *i; |
1864 |
struct acl_role_label *role; |
1865 |
|
1866 |
for (role = role_list_head; role; role = role->next) { |
1867 |
update_acl_subj_label(matchn->inode, matchn->device, |
1868 |
dentry->d_inode->i_ino, |
1869 |
dentry->d_inode->i_dev, role); |
1870 |
|
1871 |
for (i = role->proc_subject; i; i = i->next) { |
1872 |
if (unlikely(i->parent_subject && |
1873 |
(i->inode == dentry->d_inode->i_ino) && |
1874 |
(i->device == dentry->d_inode->i_dev))) { |
1875 |
i->inode = dentry->d_inode->i_ino; |
1876 |
i->device = dentry->d_inode->i_dev; |
1877 |
} |
1878 |
update_acl_obj_label(matchn->inode, matchn->device, |
1879 |
dentry->d_inode->i_ino, |
1880 |
dentry->d_inode->i_dev, i); |
1881 |
} |
1882 |
} |
1883 |
|
1884 |
update_inodev_entry(matchn->inode, matchn->device, |
1885 |
dentry->d_inode->i_ino, dentry->d_inode->i_dev); |
1886 |
|
1887 |
return; |
1888 |
} |
1889 |
|
1890 |
void |
1891 |
gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) |
1892 |
{ |
1893 |
struct name_entry *matchn; |
1894 |
|
1895 |
if (unlikely(!(gr_status & GR_READY))) |
1896 |
return; |
1897 |
|
1898 |
matchn = lookup_name_entry(gr_to_filename(dentry, mnt)); |
1899 |
|
1900 |
if (unlikely((unsigned long)matchn)) { |
1901 |
write_lock(&gr_inode_lock); |
1902 |
do_handle_create(matchn, dentry, mnt); |
1903 |
write_unlock(&gr_inode_lock); |
1904 |
} |
1905 |
|
1906 |
return; |
1907 |
} |
1908 |
|
1909 |
int |
1910 |
gr_handle_rename(struct inode *old_dir, struct inode *new_dir, |
1911 |
struct dentry *old_dentry, |
1912 |
struct dentry *new_dentry, |
1913 |
struct vfsmount *mnt, const __u8 replace) |
1914 |
{ |
1915 |
struct name_entry *matchn; |
1916 |
int error = 0; |
1917 |
|
1918 |
matchn = lookup_name_entry(gr_to_filename(new_dentry, mnt)); |
1919 |
|
1920 |
lock_kernel(); |
1921 |
error = vfs_rename(old_dir, old_dentry, new_dir, new_dentry); |
1922 |
unlock_kernel(); |
1923 |
|
1924 |
if (unlikely(error)) |
1925 |
return error; |
1926 |
|
1927 |
/* we wouldn't have to check d_inode if it weren't for |
1928 |
NFS silly-renaming |
1929 |
*/ |
1930 |
|
1931 |
write_lock(&gr_inode_lock); |
1932 |
if (unlikely(replace && new_dentry->d_inode)) { |
1933 |
if (unlikely(lookup_inodev_entry(new_dentry->d_inode->i_ino, |
1934 |
new_dentry->d_inode->i_dev) && |
1935 |
(old_dentry->d_inode->i_nlink <= 1))) |
1936 |
do_handle_delete(new_dentry->d_inode->i_ino, |
1937 |
new_dentry->d_inode->i_dev); |
1938 |
} |
1939 |
|
1940 |
if (unlikely(lookup_inodev_entry(old_dentry->d_inode->i_ino, |
1941 |
old_dentry->d_inode->i_dev) && |
1942 |
(old_dentry->d_inode->i_nlink <= 1))) |
1943 |
do_handle_delete(old_dentry->d_inode->i_ino, |
1944 |
old_dentry->d_inode->i_dev); |
1945 |
|
1946 |
if (unlikely((unsigned long)matchn)) |
1947 |
do_handle_create(matchn, old_dentry, mnt); |
1948 |
write_unlock(&gr_inode_lock); |
1949 |
|
1950 |
return error; |
1951 |
} |
1952 |
|
1953 |
static int |
1954 |
lookup_special_role_auth(const char *rolename, unsigned char **salt, |
1955 |
unsigned char **sum) |
1956 |
{ |
1957 |
struct acl_role_label *r; |
1958 |
struct role_transition *trans; |
1959 |
__u16 i; |
1960 |
int found = 0; |
1961 |
|
1962 |
/* check transition table */ |
1963 |
|
1964 |
for (trans = current->role->transitions; trans; trans = trans->next) { |
1965 |
if (!strcmp(rolename, trans->rolename)) { |
1966 |
found = 1; |
1967 |
break; |
1968 |
} |
1969 |
} |
1970 |
|
1971 |
if (!found) |
1972 |
return 0; |
1973 |
|
1974 |
/* handle special roles that do not require authentication */ |
1975 |
|
1976 |
for (r = role_list_head; r; r = r->next) { |
1977 |
if (!strcmp(rolename, r->rolename) |
1978 |
&& (r->roletype & GR_ROLE_NOPW)) { |
1979 |
*salt = NULL; |
1980 |
*sum = NULL; |
1981 |
return 1; |
1982 |
} |
1983 |
} |
1984 |
|
1985 |
for (i = 0; i < num_sprole_pws; i++) { |
1986 |
if (!strcmp(rolename, acl_special_roles[i]->rolename)) { |
1987 |
*salt = acl_special_roles[i]->salt; |
1988 |
*sum = acl_special_roles[i]->sum; |
1989 |
return 1; |
1990 |
} |
1991 |
} |
1992 |
|
1993 |
return 0; |
1994 |
} |
1995 |
|
1996 |
static void |
1997 |
assign_special_role(char *rolename) |
1998 |
{ |
1999 |
struct acl_object_label *obj; |
2000 |
struct acl_role_label *r; |
2001 |
struct acl_role_label *assigned = NULL; |
2002 |
struct task_struct *tsk; |
2003 |
struct file *filp; |
2004 |
|
2005 |
for (r = role_list_head; r; r = r->next) |
2006 |
if (!strcmp(rolename, r->rolename) && |
2007 |
(r->roletype & GR_ROLE_SPECIAL)) |
2008 |
assigned = r; |
2009 |
|
2010 |
if (!assigned) |
2011 |
return; |
2012 |
|
2013 |
tsk = current->p_pptr; |
2014 |
filp = tsk->exec_file; |
2015 |
|
2016 |
if (tsk && filp) { |
2017 |
tsk->is_writable = 0; |
2018 |
|
2019 |
acl_sp_role_value = (acl_sp_role_value % 65535) + 1; |
2020 |
tsk->acl_sp_role = 1; |
2021 |
tsk->acl_role_id = acl_sp_role_value; |
2022 |
tsk->role = assigned; |
2023 |
tsk->acl = |
2024 |
chk_subj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role); |
2025 |
|
2026 |
/* ignore additional mmap checks for processes that are writable |
2027 |
by the default ACL */ |
2028 |
obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); |
2029 |
if (unlikely(obj->mode & GR_WRITE)) |
2030 |
tsk->is_writable = 1; |
2031 |
obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role->root_label); |
2032 |
if (unlikely(obj->mode & GR_WRITE)) |
2033 |
tsk->is_writable = 1; |
2034 |
|
2035 |
#ifdef CONFIG_GRKERNSEC_ACL_DEBUG |
2036 |
printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid); |
2037 |
#endif |
2038 |
} |
2039 |
|
2040 |
return; |
2041 |
} |
2042 |
|
2043 |
ssize_t |
2044 |
write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos) |
2045 |
{ |
2046 |
struct gr_arg *arg; |
2047 |
unsigned char *sprole_salt; |
2048 |
unsigned char *sprole_sum; |
2049 |
int error = sizeof (struct gr_arg); |
2050 |
int error2 = 0; |
2051 |
|
2052 |
down(&gr_dev_sem); |
2053 |
|
2054 |
arg = (struct gr_arg *) buf; |
2055 |
|
2056 |
if (count != sizeof (struct gr_arg)) { |
2057 |
security_alert_good(GR_DEV_ACL_MSG, count, |
2058 |
(int) sizeof (struct gr_arg)); |
2059 |
error = -EINVAL; |
2060 |
goto out; |
2061 |
} |
2062 |
|
2063 |
if ((gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) |
2064 |
&& time_before_eq(gr_auth_expires, jiffies)) { |
2065 |
gr_auth_expires = 0; |
2066 |
gr_auth_attempts = 0; |
2067 |
} |
2068 |
|
2069 |
if (copy_from_user(gr_usermode, arg, sizeof (struct gr_arg))) { |
2070 |
error = -EFAULT; |
2071 |
goto out; |
2072 |
} |
2073 |
|
2074 |
if (gr_usermode->mode != SPROLE && time_after(gr_auth_expires, jiffies)) { |
2075 |
error = -EBUSY; |
2076 |
goto out; |
2077 |
} |
2078 |
|
2079 |
/* if non-root trying to do anything other than use a special role, |
2080 |
do not attempt authentication, do not count towards authentication |
2081 |
locking |
2082 |
*/ |
2083 |
|
2084 |
if (gr_usermode->mode != SPROLE && current->uid) { |
2085 |
error = -EPERM; |
2086 |
goto out; |
2087 |
} |
2088 |
|
2089 |
/* ensure pw and special role name are null terminated */ |
2090 |
|
2091 |
gr_usermode->pw[GR_PW_LEN - 1] = '\0'; |
2092 |
gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0'; |
2093 |
|
2094 |
/* Okay. |
2095 |
* We have our enough of the argument structure..(we have yet |
2096 |
* to copy_from_user the tables themselves) . Copy the tables |
2097 |
* only if we need them, i.e. for loading operations. */ |
2098 |
|
2099 |
switch (gr_usermode->mode) { |
2100 |
case STATUS: |
2101 |
if (gr_status & GR_READY) |
2102 |
error = 1; |
2103 |
else |
2104 |
error = 2; |
2105 |
goto out; |
2106 |
case SHUTDOWN: |
2107 |
if ((gr_status & GR_READY) |
2108 |
&& !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { |
2109 |
gr_status &= ~GR_READY; |
2110 |
security_alert_good(GR_SHUTS_ACL_MSG, DEFAULTSECARGS); |
2111 |
free_variables(); |
2112 |
memset(gr_usermode, 0, sizeof (struct gr_arg)); |
2113 |
memset(gr_system_salt, 0, GR_SALT_LEN); |
2114 |
memset(gr_system_sum, 0, GR_SHA_LEN); |
2115 |
} else if (gr_status & GR_READY) { |
2116 |
security_alert(GR_SHUTF_ACL_MSG, DEFAULTSECARGS); |
2117 |
error = -EPERM; |
2118 |
} else { |
2119 |
security_alert_good(GR_SHUTI_ACL_MSG, DEFAULTSECARGS); |
2120 |
error = -EAGAIN; |
2121 |
} |
2122 |
break; |
2123 |
case ENABLE: |
2124 |
if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode))) |
2125 |
security_alert_good(GR_ENABLE_ACL_MSG, GR_VERSION); |
2126 |
else { |
2127 |
if (gr_status & GR_READY) |
2128 |
error = -EAGAIN; |
2129 |
else |
2130 |
error = error2; |
2131 |
security_alert(GR_ENABLEF_ACL_MSG, GR_VERSION, |
2132 |
DEFAULTSECARGS); |
2133 |
} |
2134 |
break; |
2135 |
case RELOAD: |
2136 |
if (!(gr_status & GR_READY)) { |
2137 |
security_alert_good(GR_RELOADI_ACL_MSG); |
2138 |
error = -EAGAIN; |
2139 |
} else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { |
2140 |
lock_kernel(); |
2141 |
gr_status &= ~GR_READY; |
2142 |
free_variables(); |
2143 |
if (!(error2 = gracl_init(gr_usermode))) { |
2144 |
unlock_kernel(); |
2145 |
security_alert_good(GR_RELOAD_ACL_MSG, |
2146 |
GR_VERSION); |
2147 |
} else { |
2148 |
unlock_kernel(); |
2149 |
error = error2; |
2150 |
security_alert(GR_RELOADF_ACL_MSG, GR_VERSION, |
2151 |
DEFAULTSECARGS); |
2152 |
} |
2153 |
} else { |
2154 |
security_alert(GR_RELOADF_ACL_MSG, GR_VERSION, |
2155 |
DEFAULTSECARGS); |
2156 |
error = -EPERM; |
2157 |
} |
2158 |
break; |
2159 |
case SEGVMOD: |
2160 |
if (unlikely(!(gr_status & GR_READY))) { |
2161 |
security_alert_good(GR_SEGVMODI_ACL_MSG, |
2162 |
DEFAULTSECARGS); |
2163 |
error = -EAGAIN; |
2164 |
break; |
2165 |
} |
2166 |
|
2167 |
if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { |
2168 |
security_alert_good(GR_SEGVMODS_ACL_MSG, |
2169 |
DEFAULTSECARGS); |
2170 |
if (gr_usermode->segv_device && gr_usermode->segv_inode) { |
2171 |
struct acl_subject_label *segvacl; |
2172 |
segvacl = |
2173 |
lookup_acl_subj_label(gr_usermode->segv_inode, |
2174 |
gr_usermode->segv_device, |
2175 |
current->role); |
2176 |
if (segvacl) { |
2177 |
segvacl->crashes = 0; |
2178 |
segvacl->expires = 0; |
2179 |
} |
2180 |
} else if (gr_find_uid(gr_usermode->segv_uid) >= 0) { |
2181 |
gr_remove_uid(gr_usermode->segv_uid); |
2182 |
} |
2183 |
} else { |
2184 |
security_alert(GR_SEGVMODF_ACL_MSG, DEFAULTSECARGS); |
2185 |
error = -EPERM; |
2186 |
} |
2187 |
break; |
2188 |
case SPROLE: |
2189 |
if (unlikely(!(gr_status & GR_READY))) { |
2190 |
security_alert_good(GR_SPROLEI_ACL_MSG, DEFAULTSECARGS); |
2191 |
error = -EAGAIN; |
2192 |
break; |
2193 |
} |
2194 |
|
2195 |
if ((current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) |
2196 |
&& time_before_eq(current->role->expires, jiffies)) { |
2197 |
current->role->expires = 0; |
2198 |
current->role->auth_attempts = 0; |
2199 |
} |
2200 |
|
2201 |
if (time_after(current->role->expires, jiffies)) { |
2202 |
error = -EBUSY; |
2203 |
goto out; |
2204 |
} |
2205 |
|
2206 |
if (lookup_special_role_auth |
2207 |
(gr_usermode->sp_role, &sprole_salt, &sprole_sum) |
2208 |
&& ((!sprole_salt && !sprole_sum) |
2209 |
|| !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) { |
2210 |
assign_special_role(gr_usermode->sp_role); |
2211 |
security_alert_good(GR_SPROLES_ACL_MSG, |
2212 |
(current->p_pptr) ? current-> |
2213 |
p_pptr->role->rolename : "", |
2214 |
acl_sp_role_value, DEFAULTSECARGS); |
2215 |
} else { |
2216 |
security_alert(GR_SPROLEF_ACL_MSG, gr_usermode->sp_role, |
2217 |
DEFAULTSECARGS); |
2218 |
error = -EPERM; |
2219 |
current->role->auth_attempts++; |
2220 |
if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) { |
2221 |
current->role->expires = |
2222 |
jiffies + CONFIG_GRKERNSEC_ACL_TIMEOUT * HZ; |
2223 |
security_alert(GR_MAXROLEPW_ACL_MSG, |
2224 |
CONFIG_GRKERNSEC_ACL_MAXTRIES, |
2225 |
gr_usermode->sp_role, DEFAULTSECARGS); |
2226 |
} |
2227 |
|
2228 |
goto out; |
2229 |
} |
2230 |
break; |
2231 |
case UNSPROLE: |
2232 |
if (unlikely(!(gr_status & GR_READY))) { |
2233 |
security_alert_good(GR_UNSPROLEI_ACL_MSG, DEFAULTSECARGS); |
2234 |
error = -EAGAIN; |
2235 |
break; |
2236 |
} |
2237 |
|
2238 |
if ((current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) |
2239 |
&& time_before_eq(current->role->expires, jiffies)) { |
2240 |
current->role->expires = 0; |
2241 |
current->role->auth_attempts = 0; |
2242 |
} |
2243 |
|
2244 |
if (time_after(current->role->expires, jiffies)) { |
2245 |
error = -EBUSY; |
2246 |
goto out; |
2247 |
} |
2248 |
|
2249 |
if ((current->role->roletype & GR_ROLE_SPECIAL) && |
2250 |
lookup_special_role_auth |
2251 |
(current->role->rolename, &sprole_salt, &sprole_sum) |
2252 |
&& ((!sprole_salt && !sprole_sum) |
2253 |
|| !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) { |
2254 |
security_alert_good(GR_UNSPROLES_ACL_MSG, |
2255 |
(current->p_pptr) ? current-> |
2256 |
p_pptr->role->rolename : "", |
2257 |
(current->p_pptr) ? current-> |
2258 |
p_pptr->acl_role_id : 0, DEFAULTSECARGS); |
2259 |
gr_set_acls(1); |
2260 |
if (current->p_pptr) |
2261 |
current->p_pptr->acl_sp_role = 0; |
2262 |
} else { |
2263 |
security_alert(GR_UNSPROLEF_ACL_MSG, gr_usermode->sp_role, |
2264 |
DEFAULTSECARGS); |
2265 |
error = -EPERM; |
2266 |
current->role->auth_attempts++; |
2267 |
if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) { |
2268 |
current->role->expires = |
2269 |
jiffies + CONFIG_GRKERNSEC_ACL_TIMEOUT * HZ; |
2270 |
security_alert(GR_MAXROLEPW_ACL_MSG, |
2271 |
CONFIG_GRKERNSEC_ACL_MAXTRIES, |
2272 |
current->role->rolename, DEFAULTSECARGS); |
2273 |
} |
2274 |
|
2275 |
goto out; |
2276 |
} |
2277 |
break; |
2278 |
default: |
2279 |
security_alert(GR_INVMODE_ACL_MSG, gr_usermode->mode, |
2280 |
DEFAULTSECARGS); |
2281 |
error = -EINVAL; |
2282 |
break; |
2283 |
} |
2284 |
|
2285 |
if (error != -EPERM) |
2286 |
goto out; |
2287 |
|
2288 |
gr_auth_attempts++; |
2289 |
|
2290 |
if (gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) { |
2291 |
security_alert(GR_MAXPW_ACL_MSG, CONFIG_GRKERNSEC_ACL_MAXTRIES); |
2292 |
gr_auth_expires = jiffies + CONFIG_GRKERNSEC_ACL_TIMEOUT * HZ; |
2293 |
} |
2294 |
|
2295 |
out: |
2296 |
up(&gr_dev_sem); |
2297 |
return error; |
2298 |
} |
2299 |
|
2300 |
int |
2301 |
gr_set_acls(const int type) |
2302 |
{ |
2303 |
struct acl_object_label *obj; |
2304 |
struct task_struct *task; |
2305 |
struct file *filp; |
2306 |
unsigned short i; |
2307 |
|
2308 |
read_lock(&tasklist_lock); |
2309 |
for_each_task(task) { |
2310 |
/* check to see if we're called from the exit handler, |
2311 |
if so, only replace ACLs that have inherited the admin |
2312 |
ACL */ |
2313 |
|
2314 |
if (type && (task->role != current->role || |
2315 |
task->acl_role_id != current->acl_role_id)) |
2316 |
continue; |
2317 |
|
2318 |
task->acl_role_id = 0; |
2319 |
|
2320 |
if ((filp = task->exec_file)) { |
2321 |
do_set_role_label(task, task->uid, task->gid); |
2322 |
|
2323 |
task->acl = |
2324 |
chk_subj_label(filp->f_dentry, filp->f_vfsmnt, |
2325 |
task->role); |
2326 |
if (task->acl) { |
2327 |
struct acl_subject_label *curr; |
2328 |
curr = task->acl; |
2329 |
|
2330 |
task->is_writable = 0; |
2331 |
/* ignore additional mmap checks for processes that are writable |
2332 |
by the default ACL */ |
2333 |
obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); |
2334 |
if (unlikely(obj->mode & GR_WRITE)) |
2335 |
task->is_writable = 1; |
2336 |
obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label); |
2337 |
if (unlikely(obj->mode & GR_WRITE)) |
2338 |
task->is_writable = 1; |
2339 |
|
2340 |
#ifdef CONFIG_GRKERNSEC_ACL_DEBUG |
2341 |
printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); |
2342 |
#endif |
2343 |
if (!(curr->mode & GR_LEARN)) |
2344 |
for (i = 0; i < RLIM_NLIMITS; i++) { |
2345 |
if (!(curr->resmask & (1 << i))) |
2346 |
continue; |
2347 |
|
2348 |
task->rlim[i].rlim_cur = |
2349 |
curr->res[i].rlim_cur; |
2350 |
task->rlim[i].rlim_max = |
2351 |
curr->res[i].rlim_max; |
2352 |
} |
2353 |
} else { |
2354 |
read_unlock(&tasklist_lock); |
2355 |
security_alert_good(GR_DEFACL_MSG, task->comm, |
2356 |
task->pid); |
2357 |
return 1; |
2358 |
} |
2359 |
} else { |
2360 |
// it's a kernel process |
2361 |
task->role = kernel_role; |
2362 |
task->acl = kernel_role->root_label; |
2363 |
#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN |
2364 |
task->acl->mode &= ~GR_FIND; |
2365 |
#endif |
2366 |
} |
2367 |
} |
2368 |
read_unlock(&tasklist_lock); |
2369 |
return 0; |
2370 |
} |
2371 |
|
2372 |
void |
2373 |
gr_learn_resource(const struct task_struct *task, |
2374 |
const int res, const unsigned long wanted) |
2375 |
{ |
2376 |
struct acl_subject_label *acl; |
2377 |
|
2378 |
if (unlikely((gr_status & GR_READY) && |
2379 |
task->acl && (task->acl->mode & GR_LEARN))) |
2380 |
goto skip_reslog; |
2381 |
|
2382 |
#ifdef CONFIG_GRKERNSEC_RESLOG |
2383 |
gr_log_resource(task, res, wanted); |
2384 |
#endif |
2385 |
skip_reslog: |
2386 |
|
2387 |
if (unlikely(!(gr_status & GR_READY) || !wanted)) |
2388 |
return; |
2389 |
|
2390 |
acl = task->acl; |
2391 |
|
2392 |
if (likely(!acl || !(acl->mode & GR_LEARN) || |
2393 |
!(acl->resmask & (1 << (unsigned short) res)))) |
2394 |
return; |
2395 |
|
2396 |
if (wanted >= acl->res[res].rlim_cur) { |
2397 |
unsigned long res_add; |
2398 |
|
2399 |
res_add = wanted; |
2400 |
switch (res) { |
2401 |
case RLIMIT_CPU: |
2402 |
res_add += GR_RLIM_CPU_BUMP; |
2403 |
break; |
2404 |
case RLIMIT_FSIZE: |
2405 |
res_add += GR_RLIM_FSIZE_BUMP; |
2406 |
break; |
2407 |
case RLIMIT_DATA: |
2408 |
res_add += GR_RLIM_DATA_BUMP; |
2409 |
break; |
2410 |
case RLIMIT_STACK: |
2411 |
res_add += GR_RLIM_STACK_BUMP; |
2412 |
break; |
2413 |
case RLIMIT_CORE: |
2414 |
res_add += GR_RLIM_CORE_BUMP; |
2415 |
break; |
2416 |
case RLIMIT_RSS: |
2417 |
res_add += GR_RLIM_RSS_BUMP; |
2418 |
break; |
2419 |
case RLIMIT_NPROC: |
2420 |
res_add += GR_RLIM_NPROC_BUMP; |
2421 |
break; |
2422 |
case RLIMIT_NOFILE: |
2423 |
res_add += GR_RLIM_NOFILE_BUMP; |
2424 |
break; |
2425 |
case RLIMIT_MEMLOCK: |
2426 |
res_add += GR_RLIM_MEMLOCK_BUMP; |
2427 |
break; |
2428 |
case RLIMIT_AS: |
2429 |
res_add += GR_RLIM_AS_BUMP; |
2430 |
break; |
2431 |
case RLIMIT_LOCKS: |
2432 |
res_add += GR_RLIM_LOCKS_BUMP; |
2433 |
break; |
2434 |
} |
2435 |
|
2436 |
acl->res[res].rlim_cur = res_add; |
2437 |
|
2438 |
if (wanted > acl->res[res].rlim_max) |
2439 |
acl->res[res].rlim_max = res_add; |
2440 |
|
2441 |
security_learn(GR_LEARN_AUDIT_MSG, current->role->rolename, |
2442 |
current->role->roletype, acl->filename, |
2443 |
acl->res[res].rlim_cur, acl->res[res].rlim_max, |
2444 |
"", (unsigned long) res); |
2445 |
} |
2446 |
|
2447 |
return; |
2448 |
} |
2449 |
|
2450 |
#ifdef CONFIG_SYSCTL |
2451 |
extern struct proc_dir_entry *proc_sys_root; |
2452 |
|
2453 |
__u32 |
2454 |
gr_handle_sysctl(const struct ctl_table *table, const void *oldval, |
2455 |
const void *newval) |
2456 |
{ |
2457 |
struct proc_dir_entry *tmp; |
2458 |
struct nameidata nd; |
2459 |
const char *proc_sys = "/proc/sys"; |
2460 |
char *path = gr_shared_page[0][smp_processor_id()]; |
2461 |
struct acl_object_label *obj; |
2462 |
unsigned short len = 0, pos = 0, depth = 0, i; |
2463 |
__u32 err = 0; |
2464 |
__u32 mode = 0; |
2465 |
|
2466 |
if (unlikely(!(gr_status & GR_READY))) |
2467 |
return 1; |
2468 |
|
2469 |
if (oldval) |
2470 |
mode |= GR_READ; |
2471 |
if (newval) |
2472 |
mode |= GR_WRITE; |
2473 |
|
2474 |
/* convert the requested sysctl entry into a pathname */ |
2475 |
|
2476 |
for (tmp = table->de; tmp != proc_sys_root; tmp = tmp->parent) { |
2477 |
len += strlen(tmp->name); |
2478 |
len++; |
2479 |
depth++; |
2480 |
} |
2481 |
|
2482 |
if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) |
2483 |
return 0; // deny |
2484 |
|
2485 |
memset(path, 0, PAGE_SIZE); |
2486 |
|
2487 |
memcpy(path, proc_sys, strlen(proc_sys)); |
2488 |
|
2489 |
pos += strlen(proc_sys); |
2490 |
|
2491 |
for (; depth > 0; depth--) { |
2492 |
path[pos] = '/'; |
2493 |
pos++; |
2494 |
for (i = 1, tmp = table->de; tmp != proc_sys_root; |
2495 |
tmp = tmp->parent) { |
2496 |
if (depth == i) { |
2497 |
memcpy(path + pos, tmp->name, |
2498 |
strlen(tmp->name)); |
2499 |
pos += strlen(tmp->name); |
2500 |
} |
2501 |
i++; |
2502 |
} |
2503 |
} |
2504 |
|
2505 |
if (path_init(path, LOOKUP_FOLLOW, &nd)) |
2506 |
err = path_walk(path, &nd); |
2507 |
|
2508 |
if (err) |
2509 |
goto out; |
2510 |
|
2511 |
obj = chk_obj_label(nd.dentry, nd.mnt, current->acl); |
2512 |
err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS); |
2513 |
|
2514 |
if (unlikely((current->acl->mode & GR_LEARN) && ((err & mode) != mode))) { |
2515 |
__u32 new_mode = mode; |
2516 |
|
2517 |
new_mode &= ~(GR_AUDITS | GR_SUPPRESS); |
2518 |
|
2519 |
err = new_mode; |
2520 |
gr_log_learn(current->role, current->uid, current->gid, |
2521 |
current, path, new_mode); |
2522 |
} else if ((err & mode) != mode && !(err & GR_SUPPRESS)) { |
2523 |
security_alert(GR_SYSCTL_ACL_MSG, "denied", path, |
2524 |
(mode & GR_READ) ? " reading" : "", |
2525 |
(mode & GR_WRITE) ? " writing" : "", |
2526 |
DEFAULTSECARGS); |
2527 |
err = 0; |
2528 |
} else if ((err & mode) != mode) { |
2529 |
err = 0; |
2530 |
} else if (((err & mode) == mode) && (err & GR_AUDITS)) { |
2531 |
security_audit(GR_SYSCTL_ACL_MSG, "successful", |
2532 |
path, (mode & GR_READ) ? " reading" : "", |
2533 |
(mode & GR_WRITE) ? " writing" : "", |
2534 |
DEFAULTSECARGS); |
2535 |
} |
2536 |
|
2537 |
path_release(&nd); |
2538 |
|
2539 |
out: |
2540 |
return err; |
2541 |
} |
2542 |
#endif |
2543 |
|
2544 |
int |
2545 |
gr_handle_ptrace(struct task_struct *task, const long request) |
2546 |
{ |
2547 |
struct file *filp; |
2548 |
__u32 retmode; |
2549 |
|
2550 |
if (unlikely(!(gr_status & GR_READY))) |
2551 |
return 0; |
2552 |
|
2553 |
filp = task->exec_file; |
2554 |
|
2555 |
if (unlikely(!filp)) |
2556 |
return 0; |
2557 |
|
2558 |
retmode = gr_search_file(filp->f_dentry, GR_PTRACERD, filp->f_vfsmnt); |
2559 |
|
2560 |
if (retmode & GR_PTRACERD) { |
2561 |
switch (request) { |
2562 |
case PTRACE_POKETEXT: |
2563 |
case PTRACE_POKEDATA: |
2564 |
case PTRACE_POKEUSR: |
2565 |
#if !defined(CONFIG_PPC32) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) |
2566 |
case PTRACE_SETREGS: |
2567 |
case PTRACE_SETFPREGS: |
2568 |
#endif |
2569 |
#ifdef CONFIG_X86 |
2570 |
case PTRACE_SETFPXREGS: |
2571 |
#endif |
2572 |
#ifdef CONFIG_ALTIVEC |
2573 |
case PTRACE_SETVRREGS: |
2574 |
#endif |
2575 |
return 1; |
2576 |
default: |
2577 |
return 0; |
2578 |
} |
2579 |
} else if (!(current->acl->mode & GR_OVERRIDE) && |
2580 |
!(current->role->roletype & GR_ROLE_GOD) |
2581 |
&& (current->acl != task->acl |
2582 |
|| (current->acl != current->role->root_label |
2583 |
&& current->pid != task->pid))) { |
2584 |
security_alert(GR_PTRACE_ACL_MSG, |
2585 |
gr_to_filename(filp->f_dentry, filp->f_vfsmnt), |
2586 |
task->comm, task->pid, DEFAULTSECARGS); |
2587 |
return 1; |
2588 |
} |
2589 |
|
2590 |
return 0; |
2591 |
} |
2592 |
|
2593 |
int |
2594 |
gr_handle_ptrace_exec(const struct dentry *dentry, const struct vfsmount *mnt) |
2595 |
{ |
2596 |
__u32 retmode; |
2597 |
struct acl_subject_label *subj; |
2598 |
|
2599 |
if (unlikely(!(gr_status & GR_READY))) |
2600 |
return 0; |
2601 |
|
2602 |
if (unlikely |
2603 |
((current->ptrace & PT_PTRACED) |
2604 |
&& !(current->acl->mode & GR_OVERRIDE))) |
2605 |
retmode = gr_search_file(dentry, GR_PTRACERD, mnt); |
2606 |
else |
2607 |
return 0; |
2608 |
|
2609 |
subj = chk_subj_label(dentry, mnt, current->role); |
2610 |
|
2611 |
if (!(retmode & GR_PTRACERD) && |
2612 |
!(current->role->roletype & GR_ROLE_GOD) && |
2613 |
(current->acl != subj)) { |
2614 |
security_alert(GR_PTRACE_EXEC_ACL_MSG, |
2615 |
gr_to_filename(dentry, mnt), DEFAULTSECARGS); |
2616 |
return 1; |
2617 |
} |
2618 |
|
2619 |
return 0; |
2620 |
} |
2621 |
|
2622 |
int |
2623 |
gr_handle_mmap(const struct file *filp, const unsigned long prot) |
2624 |
{ |
2625 |
struct acl_object_label *obj, *obj2; |
2626 |
|
2627 |
if (unlikely(!(gr_status & GR_READY) || |
2628 |
(current->acl->mode & GR_OVERRIDE) || !filp || |
2629 |
!(prot & PROT_EXEC))) |
2630 |
return 0; |
2631 |
|
2632 |
if (unlikely(current->is_writable)) |
2633 |
return 0; |
2634 |
|
2635 |
obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); |
2636 |
obj2 = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, |
2637 |
current->role->root_label); |
2638 |
if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) { |
2639 |
security_alert(GR_WRITLIB_ACL_MSG, |
2640 |
gr_to_filename(filp->f_dentry, filp->f_vfsmnt), |
2641 |
DEFAULTSECARGS); |
2642 |
return 1; |
2643 |
} |
2644 |
|
2645 |
return 0; |
2646 |
} |
2647 |
|
2648 |
int |
2649 |
gr_acl_handle_mmap(const struct file *file, const unsigned long prot) |
2650 |
{ |
2651 |
__u32 mode; |
2652 |
|
2653 |
if (unlikely(!file || !(prot & PROT_EXEC))) |
2654 |
return 1; |
2655 |
|
2656 |
mode = |
2657 |
gr_search_file(file->f_dentry, |
2658 |
GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, |
2659 |
file->f_vfsmnt); |
2660 |
|
2661 |
if (unlikely(!gr_tpe_allow(file) || (!(mode & GR_EXEC) && !(mode & GR_SUPPRESS)))) { |
2662 |
security_alert(GR_MMAP_ACL_MSG, "denied", |
2663 |
gr_to_filename(file->f_dentry, file->f_vfsmnt), |
2664 |
DEFAULTSECARGS); |
2665 |
return 0; |
2666 |
} else if (unlikely(!gr_tpe_allow(file) || !(mode & GR_EXEC))) { |
2667 |
return 0; |
2668 |
} else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { |
2669 |
security_audit(GR_MMAP_ACL_MSG, "successful", |
2670 |
gr_to_filename(file->f_dentry, file->f_vfsmnt), |
2671 |
DEFAULTSECARGS); |
2672 |
return 1; |
2673 |
} |
2674 |
|
2675 |
return 1; |
2676 |
} |
2677 |
|
2678 |
int |
2679 |
gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) |
2680 |
{ |
2681 |
__u32 mode; |
2682 |
|
2683 |
if (unlikely(!file || !(prot & PROT_EXEC))) |
2684 |
return 1; |
2685 |
|
2686 |
mode = |
2687 |
gr_search_file(file->f_dentry, |
2688 |
GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, |
2689 |
file->f_vfsmnt); |
2690 |
|
2691 |
if (unlikely(!gr_tpe_allow(file) || (!(mode & GR_EXEC) && !(mode & GR_SUPPRESS)))) { |
2692 |
security_alert(GR_MPROTECT_ACL_MSG, "denied", |
2693 |
gr_to_filename(file->f_dentry, file->f_vfsmnt), |
2694 |
DEFAULTSECARGS); |
2695 |
return 0; |
2696 |
} else if (unlikely(!gr_tpe_allow(file) || !(mode & GR_EXEC))) { |
2697 |
return 0; |
2698 |
} else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { |
2699 |
security_audit(GR_MPROTECT_ACL_MSG, "successful", |
2700 |
gr_to_filename(file->f_dentry, file->f_vfsmnt), |
2701 |
DEFAULTSECARGS); |
2702 |
return 1; |
2703 |
} |
2704 |
|
2705 |
return 1; |
2706 |
} |
2707 |
|
2708 |
void |
2709 |
gr_acl_handle_psacct(struct task_struct *task, const long code) |
2710 |
{ |
2711 |
unsigned long runtime; |
2712 |
unsigned long cputime; |
2713 |
unsigned int wday, cday; |
2714 |
__u8 whr, chr; |
2715 |
__u8 wmin, cmin; |
2716 |
__u8 wsec, csec; |
2717 |
char cur_tty[64] = { 0 }; |
2718 |
char parent_tty[64] = { 0 }; |
2719 |
|
2720 |
if (unlikely(!(gr_status & GR_READY) || !task->acl || |
2721 |
!(task->acl->mode & GR_PROCACCT))) |
2722 |
return; |
2723 |
|
2724 |
runtime = (jiffies - task->start_time) / HZ; |
2725 |
wday = runtime / (3600 * 24); |
2726 |
runtime -= wday * (3600 * 24); |
2727 |
whr = runtime / 3600; |
2728 |
runtime -= whr * 3600; |
2729 |
wmin = runtime / 60; |
2730 |
runtime -= wmin * 60; |
2731 |
wsec = runtime; |
2732 |
|
2733 |
cputime = (task->times.tms_utime + task->times.tms_stime) / HZ; |
2734 |
cday = cputime / (3600 * 24); |
2735 |
cputime -= cday * (3600 * 24); |
2736 |
chr = cputime / 3600; |
2737 |
cputime -= chr * 3600; |
2738 |
cmin = cputime / 60; |
2739 |
cputime -= cmin * 60; |
2740 |
csec = cputime; |
2741 |
|
2742 |
security_audit(GR_ACL_PROCACCT_MSG, gr_task_fullpath(task), task->comm, |
2743 |
task->pid, NIPQUAD(task->curr_ip), tty_name(task->tty, |
2744 |
cur_tty), |
2745 |
task->uid, task->euid, task->gid, task->egid, wday, whr, |
2746 |
wmin, wsec, cday, chr, cmin, csec, |
2747 |
(task-> |
2748 |
flags & PF_SIGNALED) ? "killed by signal" : "exited", |
2749 |
code, gr_parent_task_fullpath(task), |
2750 |
task->p_pptr->comm, task->p_pptr->pid, |
2751 |
NIPQUAD(task->p_pptr->curr_ip), |
2752 |
tty_name(task->p_pptr->tty, parent_tty), |
2753 |
task->p_pptr->uid, task->p_pptr->euid, task->p_pptr->gid, |
2754 |
task->p_pptr->egid); |
2755 |
|
2756 |
return; |
2757 |
} |
2758 |
|
2759 |
void gr_set_kernel_label(struct task_struct *task) |
2760 |
{ |
2761 |
if (gr_status & GR_READY) { |
2762 |
task->role = kernel_role; |
2763 |
task->acl = kernel_role->root_label; |
2764 |
} |
2765 |
return; |
2766 |
} |