diff options
Diffstat (limited to 'security/security.c')
| -rw-r--r-- | security/security.c | 747 |
1 files changed, 158 insertions, 589 deletions
diff --git a/security/security.c b/security/security.c index 596d41818577..31a688650601 100644 --- a/security/security.c +++ b/security/security.c @@ -32,24 +32,7 @@ #include <net/flow.h> #include <net/sock.h> -#define SECURITY_HOOK_ACTIVE_KEY(HOOK, IDX) security_hook_active_##HOOK##_##IDX - -/* - * Identifier for the LSM static calls. - * HOOK is an LSM hook as defined in linux/lsm_hookdefs.h - * IDX is the index of the static call. 0 <= NUM < MAX_LSM_COUNT - */ -#define LSM_STATIC_CALL(HOOK, IDX) lsm_static_call_##HOOK##_##IDX - -/* - * Call the macro M for each LSM hook MAX_LSM_COUNT times. - */ -#define LSM_LOOP_UNROLL(M, ...) \ -do { \ - UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) \ -} while (0) - -#define LSM_DEFINE_UNROLL(M, ...) UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) +#include "lsm.h" /* * These are descriptions of the reasons that can be passed to the @@ -90,23 +73,34 @@ const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX + 1] = { [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality", }; -static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain); +bool lsm_debug __ro_after_init; -static struct kmem_cache *lsm_file_cache; -static struct kmem_cache *lsm_inode_cache; +unsigned int lsm_active_cnt __ro_after_init; +const struct lsm_id *lsm_idlist[MAX_LSM_COUNT]; -char *lsm_names; -static struct lsm_blob_sizes blob_sizes __ro_after_init; +struct lsm_blob_sizes blob_sizes; -/* Boot-time LSM user choice */ -static __initdata const char *chosen_lsm_order; -static __initdata const char *chosen_major_lsm; +struct kmem_cache *lsm_file_cache; +struct kmem_cache *lsm_inode_cache; -static __initconst const char *const builtin_lsm_order = CONFIG_LSM; +#define SECURITY_HOOK_ACTIVE_KEY(HOOK, IDX) security_hook_active_##HOOK##_##IDX -/* Ordered list of LSMs to initialize. */ -static __initdata struct lsm_info *ordered_lsms[MAX_LSM_COUNT + 1]; -static __initdata struct lsm_info *exclusive; +/* + * Identifier for the LSM static calls. + * HOOK is an LSM hook as defined in linux/lsm_hookdefs.h + * IDX is the index of the static call. 0 <= NUM < MAX_LSM_COUNT + */ +#define LSM_STATIC_CALL(HOOK, IDX) lsm_static_call_##HOOK##_##IDX + +/* + * Call the macro M for each LSM hook MAX_LSM_COUNT times. + */ +#define LSM_LOOP_UNROLL(M, ...) \ +do { \ + UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) \ +} while (0) + +#define LSM_DEFINE_UNROLL(M, ...) UNROLL(MAX_LSM_COUNT, M, __VA_ARGS__) #ifdef CONFIG_HAVE_STATIC_CALL #define LSM_HOOK_TRAMP(NAME, NUM) \ @@ -157,512 +151,26 @@ struct lsm_static_calls_table #undef INIT_LSM_STATIC_CALL }; -static __initdata bool debug; -#define init_debug(...) \ - do { \ - if (debug) \ - pr_info(__VA_ARGS__); \ - } while (0) - -static bool __init is_enabled(struct lsm_info *lsm) -{ - if (!lsm->enabled) - return false; - - return *lsm->enabled; -} - -/* Mark an LSM's enabled flag. */ -static int lsm_enabled_true __initdata = 1; -static int lsm_enabled_false __initdata = 0; -static void __init set_enabled(struct lsm_info *lsm, bool enabled) -{ - /* - * When an LSM hasn't configured an enable variable, we can use - * a hard-coded location for storing the default enabled state. - */ - if (!lsm->enabled) { - if (enabled) - lsm->enabled = &lsm_enabled_true; - else - lsm->enabled = &lsm_enabled_false; - } else if (lsm->enabled == &lsm_enabled_true) { - if (!enabled) - lsm->enabled = &lsm_enabled_false; - } else if (lsm->enabled == &lsm_enabled_false) { - if (enabled) - lsm->enabled = &lsm_enabled_true; - } else { - *lsm->enabled = enabled; - } -} - -/* Is an LSM already listed in the ordered LSMs list? */ -static bool __init exists_ordered_lsm(struct lsm_info *lsm) -{ - struct lsm_info **check; - - for (check = ordered_lsms; *check; check++) - if (*check == lsm) - return true; - - return false; -} - -/* Append an LSM to the list of ordered LSMs to initialize. */ -static int last_lsm __initdata; -static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from) -{ - /* Ignore duplicate selections. */ - if (exists_ordered_lsm(lsm)) - return; - - if (WARN(last_lsm == MAX_LSM_COUNT, "%s: out of LSM static calls!?\n", from)) - return; - - /* Enable this LSM, if it is not already set. */ - if (!lsm->enabled) - lsm->enabled = &lsm_enabled_true; - ordered_lsms[last_lsm++] = lsm; - - init_debug("%s ordered: %s (%s)\n", from, lsm->name, - is_enabled(lsm) ? "enabled" : "disabled"); -} - -/* Is an LSM allowed to be initialized? */ -static bool __init lsm_allowed(struct lsm_info *lsm) -{ - /* Skip if the LSM is disabled. */ - if (!is_enabled(lsm)) - return false; - - /* Not allowed if another exclusive LSM already initialized. */ - if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && exclusive) { - init_debug("exclusive disabled: %s\n", lsm->name); - return false; - } - - return true; -} - -static void __init lsm_set_blob_size(int *need, int *lbs) -{ - int offset; - - if (*need <= 0) - return; - - offset = ALIGN(*lbs, sizeof(void *)); - *lbs = offset + *need; - *need = offset; -} - -static void __init lsm_set_blob_sizes(struct lsm_blob_sizes *needed) -{ - if (!needed) - return; - - lsm_set_blob_size(&needed->lbs_cred, &blob_sizes.lbs_cred); - lsm_set_blob_size(&needed->lbs_file, &blob_sizes.lbs_file); - lsm_set_blob_size(&needed->lbs_ib, &blob_sizes.lbs_ib); - /* - * The inode blob gets an rcu_head in addition to - * what the modules might need. - */ - if (needed->lbs_inode && blob_sizes.lbs_inode == 0) - blob_sizes.lbs_inode = sizeof(struct rcu_head); - lsm_set_blob_size(&needed->lbs_inode, &blob_sizes.lbs_inode); - lsm_set_blob_size(&needed->lbs_ipc, &blob_sizes.lbs_ipc); - lsm_set_blob_size(&needed->lbs_key, &blob_sizes.lbs_key); - lsm_set_blob_size(&needed->lbs_msg_msg, &blob_sizes.lbs_msg_msg); - lsm_set_blob_size(&needed->lbs_perf_event, &blob_sizes.lbs_perf_event); - lsm_set_blob_size(&needed->lbs_sock, &blob_sizes.lbs_sock); - lsm_set_blob_size(&needed->lbs_superblock, &blob_sizes.lbs_superblock); - lsm_set_blob_size(&needed->lbs_task, &blob_sizes.lbs_task); - lsm_set_blob_size(&needed->lbs_tun_dev, &blob_sizes.lbs_tun_dev); - lsm_set_blob_size(&needed->lbs_xattr_count, - &blob_sizes.lbs_xattr_count); - lsm_set_blob_size(&needed->lbs_bdev, &blob_sizes.lbs_bdev); -} - -/* Prepare LSM for initialization. */ -static void __init prepare_lsm(struct lsm_info *lsm) -{ - int enabled = lsm_allowed(lsm); - - /* Record enablement (to handle any following exclusive LSMs). */ - set_enabled(lsm, enabled); - - /* If enabled, do pre-initialization work. */ - if (enabled) { - if ((lsm->flags & LSM_FLAG_EXCLUSIVE) && !exclusive) { - exclusive = lsm; - init_debug("exclusive chosen: %s\n", lsm->name); - } - - lsm_set_blob_sizes(lsm->blobs); - } -} - -/* Initialize a given LSM, if it is enabled. */ -static void __init initialize_lsm(struct lsm_info *lsm) -{ - if (is_enabled(lsm)) { - int ret; - - init_debug("initializing %s\n", lsm->name); - ret = lsm->init(); - WARN(ret, "%s failed to initialize: %d\n", lsm->name, ret); - } -} - -/* - * Current index to use while initializing the lsm id list. - */ -u32 lsm_active_cnt __ro_after_init; -const struct lsm_id *lsm_idlist[MAX_LSM_COUNT]; - -/* Populate ordered LSMs list from comma-separated LSM name list. */ -static void __init ordered_lsm_parse(const char *order, const char *origin) -{ - struct lsm_info *lsm; - char *sep, *name, *next; - - /* LSM_ORDER_FIRST is always first. */ - for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { - if (lsm->order == LSM_ORDER_FIRST) - append_ordered_lsm(lsm, " first"); - } - - /* Process "security=", if given. */ - if (chosen_major_lsm) { - struct lsm_info *major; - - /* - * To match the original "security=" behavior, this - * explicitly does NOT fallback to another Legacy Major - * if the selected one was separately disabled: disable - * all non-matching Legacy Major LSMs. - */ - for (major = __start_lsm_info; major < __end_lsm_info; - major++) { - if ((major->flags & LSM_FLAG_LEGACY_MAJOR) && - strcmp(major->name, chosen_major_lsm) != 0) { - set_enabled(major, false); - init_debug("security=%s disabled: %s (only one legacy major LSM)\n", - chosen_major_lsm, major->name); - } - } - } - - sep = kstrdup(order, GFP_KERNEL); - next = sep; - /* Walk the list, looking for matching LSMs. */ - while ((name = strsep(&next, ",")) != NULL) { - bool found = false; - - for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { - if (strcmp(lsm->name, name) == 0) { - if (lsm->order == LSM_ORDER_MUTABLE) - append_ordered_lsm(lsm, origin); - found = true; - } - } - - if (!found) - init_debug("%s ignored: %s (not built into kernel)\n", - origin, name); - } - - /* Process "security=", if given. */ - if (chosen_major_lsm) { - for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { - if (exists_ordered_lsm(lsm)) - continue; - if (strcmp(lsm->name, chosen_major_lsm) == 0) - append_ordered_lsm(lsm, "security="); - } - } - - /* LSM_ORDER_LAST is always last. */ - for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { - if (lsm->order == LSM_ORDER_LAST) - append_ordered_lsm(lsm, " last"); - } - - /* Disable all LSMs not in the ordered list. */ - for (lsm = __start_lsm_info; lsm < __end_lsm_info; lsm++) { - if (exists_ordered_lsm(lsm)) - continue; - set_enabled(lsm, false); - init_debug("%s skipped: %s (not in requested order)\n", - origin, lsm->name); - } - - kfree(sep); -} - -static void __init lsm_static_call_init(struct security_hook_list *hl) -{ - struct lsm_static_call *scall = hl->scalls; - int i; - - for (i = 0; i < MAX_LSM_COUNT; i++) { - /* Update the first static call that is not used yet */ - if (!scall->hl) { - __static_call_update(scall->key, scall->trampoline, - hl->hook.lsm_func_addr); - scall->hl = hl; - static_branch_enable(scall->active); - return; - } - scall++; - } - panic("%s - Ran out of static slots.\n", __func__); -} - -static void __init lsm_early_cred(struct cred *cred); -static void __init lsm_early_task(struct task_struct *task); - -static int lsm_append(const char *new, char **result); - -static void __init report_lsm_order(void) -{ - struct lsm_info **lsm, *early; - int first = 0; - - pr_info("initializing lsm="); - - /* Report each enabled LSM name, comma separated. */ - for (early = __start_early_lsm_info; - early < __end_early_lsm_info; early++) - if (is_enabled(early)) - pr_cont("%s%s", first++ == 0 ? "" : ",", early->name); - for (lsm = ordered_lsms; *lsm; lsm++) - if (is_enabled(*lsm)) - pr_cont("%s%s", first++ == 0 ? "" : ",", (*lsm)->name); - - pr_cont("\n"); -} - -static void __init ordered_lsm_init(void) -{ - struct lsm_info **lsm; - - if (chosen_lsm_order) { - if (chosen_major_lsm) { - pr_warn("security=%s is ignored because it is superseded by lsm=%s\n", - chosen_major_lsm, chosen_lsm_order); - chosen_major_lsm = NULL; - } - ordered_lsm_parse(chosen_lsm_order, "cmdline"); - } else - ordered_lsm_parse(builtin_lsm_order, "builtin"); - - for (lsm = ordered_lsms; *lsm; lsm++) - prepare_lsm(*lsm); - - report_lsm_order(); - - init_debug("cred blob size = %d\n", blob_sizes.lbs_cred); - init_debug("file blob size = %d\n", blob_sizes.lbs_file); - init_debug("ib blob size = %d\n", blob_sizes.lbs_ib); - init_debug("inode blob size = %d\n", blob_sizes.lbs_inode); - init_debug("ipc blob size = %d\n", blob_sizes.lbs_ipc); -#ifdef CONFIG_KEYS - init_debug("key blob size = %d\n", blob_sizes.lbs_key); -#endif /* CONFIG_KEYS */ - init_debug("msg_msg blob size = %d\n", blob_sizes.lbs_msg_msg); - init_debug("sock blob size = %d\n", blob_sizes.lbs_sock); - init_debug("superblock blob size = %d\n", blob_sizes.lbs_superblock); - init_debug("perf event blob size = %d\n", blob_sizes.lbs_perf_event); - init_debug("task blob size = %d\n", blob_sizes.lbs_task); - init_debug("tun device blob size = %d\n", blob_sizes.lbs_tun_dev); - init_debug("xattr slots = %d\n", blob_sizes.lbs_xattr_count); - init_debug("bdev blob size = %d\n", blob_sizes.lbs_bdev); - - /* - * Create any kmem_caches needed for blobs - */ - if (blob_sizes.lbs_file) - lsm_file_cache = kmem_cache_create("lsm_file_cache", - blob_sizes.lbs_file, 0, - SLAB_PANIC, NULL); - if (blob_sizes.lbs_inode) - lsm_inode_cache = kmem_cache_create("lsm_inode_cache", - blob_sizes.lbs_inode, 0, - SLAB_PANIC, NULL); - - lsm_early_cred((struct cred *) current->cred); - lsm_early_task(current); - for (lsm = ordered_lsms; *lsm; lsm++) - initialize_lsm(*lsm); -} - -int __init early_security_init(void) -{ - struct lsm_info *lsm; - - for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) { - if (!lsm->enabled) - lsm->enabled = &lsm_enabled_true; - prepare_lsm(lsm); - initialize_lsm(lsm); - } - - return 0; -} - /** - * security_init - initializes the security framework + * lsm_file_alloc - allocate a composite file blob + * @file: the file that needs a blob * - * This should be called early in the kernel initialization sequence. - */ -int __init security_init(void) -{ - struct lsm_info *lsm; - - init_debug("legacy security=%s\n", chosen_major_lsm ? : " *unspecified*"); - init_debug(" CONFIG_LSM=%s\n", builtin_lsm_order); - init_debug("boot arg lsm=%s\n", chosen_lsm_order ? : " *unspecified*"); - - /* - * Append the names of the early LSM modules now that kmalloc() is - * available - */ - for (lsm = __start_early_lsm_info; lsm < __end_early_lsm_info; lsm++) { - init_debug(" early started: %s (%s)\n", lsm->name, - is_enabled(lsm) ? "enabled" : "disabled"); - if (lsm->enabled) - lsm_append(lsm->name, &lsm_names); - } - - /* Load LSMs in specified order. */ - ordered_lsm_init(); - - return 0; -} - -/* Save user chosen LSM */ -static int __init choose_major_lsm(char *str) -{ - chosen_major_lsm = str; - return 1; -} -__setup("security=", choose_major_lsm); - -/* Explicitly choose LSM initialization order. */ -static int __init choose_lsm_order(char *str) -{ - chosen_lsm_order = str; - return 1; -} -__setup("lsm=", choose_lsm_order); - -/* Enable LSM order debugging. */ -static int __init enable_debug(char *str) -{ - debug = true; - return 1; -} -__setup("lsm.debug", enable_debug); - -static bool match_last_lsm(const char *list, const char *lsm) -{ - const char *last; - - if (WARN_ON(!list || !lsm)) - return false; - last = strrchr(list, ','); - if (last) - /* Pass the comma, strcmp() will check for '\0' */ - last++; - else - last = list; - return !strcmp(last, lsm); -} - -static int lsm_append(const char *new, char **result) -{ - char *cp; - - if (*result == NULL) { - *result = kstrdup(new, GFP_KERNEL); - if (*result == NULL) - return -ENOMEM; - } else { - /* Check if it is the last registered name */ - if (match_last_lsm(*result, new)) - return 0; - cp = kasprintf(GFP_KERNEL, "%s,%s", *result, new); - if (cp == NULL) - return -ENOMEM; - kfree(*result); - *result = cp; - } - return 0; -} - -/** - * security_add_hooks - Add a modules hooks to the hook lists. - * @hooks: the hooks to add - * @count: the number of hooks to add - * @lsmid: the identification information for the security module + * Allocate the file blob for all the modules * - * Each LSM has to register its hooks with the infrastructure. + * Returns 0, or -ENOMEM if memory can't be allocated. */ -void __init security_add_hooks(struct security_hook_list *hooks, int count, - const struct lsm_id *lsmid) +static int lsm_file_alloc(struct file *file) { - int i; - - /* - * A security module may call security_add_hooks() more - * than once during initialization, and LSM initialization - * is serialized. Landlock is one such case. - * Look at the previous entry, if there is one, for duplication. - */ - if (lsm_active_cnt == 0 || lsm_idlist[lsm_active_cnt - 1] != lsmid) { - if (lsm_active_cnt >= MAX_LSM_COUNT) - panic("%s Too many LSMs registered.\n", __func__); - lsm_idlist[lsm_active_cnt++] = lsmid; - } - - for (i = 0; i < count; i++) { - hooks[i].lsmid = lsmid; - lsm_static_call_init(&hooks[i]); - } - - /* - * Don't try to append during early_security_init(), we'll come back - * and fix this up afterwards. - */ - if (slab_is_available()) { - if (lsm_append(lsmid->name, &lsm_names) < 0) - panic("%s - Cannot get early memory.\n", __func__); + if (!lsm_file_cache) { + file->f_security = NULL; + return 0; } -} - -int call_blocking_lsm_notifier(enum lsm_event event, void *data) -{ - return blocking_notifier_call_chain(&blocking_lsm_notifier_chain, - event, data); -} -EXPORT_SYMBOL(call_blocking_lsm_notifier); - -int register_blocking_lsm_notifier(struct notifier_block *nb) -{ - return blocking_notifier_chain_register(&blocking_lsm_notifier_chain, - nb); -} -EXPORT_SYMBOL(register_blocking_lsm_notifier); -int unregister_blocking_lsm_notifier(struct notifier_block *nb) -{ - return blocking_notifier_chain_unregister(&blocking_lsm_notifier_chain, - nb); + file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL); + if (file->f_security == NULL) + return -ENOMEM; + return 0; } -EXPORT_SYMBOL(unregister_blocking_lsm_notifier); /** * lsm_blob_alloc - allocate a composite blob @@ -696,47 +204,12 @@ static int lsm_blob_alloc(void **dest, size_t size, gfp_t gfp) * * Returns 0, or -ENOMEM if memory can't be allocated. */ -static int lsm_cred_alloc(struct cred *cred, gfp_t gfp) +int lsm_cred_alloc(struct cred *cred, gfp_t gfp) { return lsm_blob_alloc(&cred->security, blob_sizes.lbs_cred, gfp); } /** - * lsm_early_cred - during initialization allocate a composite cred blob - * @cred: the cred that needs a blob - * - * Allocate the cred blob for all the modules - */ -static void __init lsm_early_cred(struct cred *cred) -{ - int rc = lsm_cred_alloc(cred, GFP_KERNEL); - - if (rc) - panic("%s: Early cred alloc failed.\n", __func__); -} - -/** - * lsm_file_alloc - allocate a composite file blob - * @file: the file that needs a blob - * - * Allocate the file blob for all the modules - * - * Returns 0, or -ENOMEM if memory can't be allocated. - */ -static int lsm_file_alloc(struct file *file) -{ - if (!lsm_file_cache) { - file->f_security = NULL; - return 0; - } - - file->f_security = kmem_cache_zalloc(lsm_file_cache, GFP_KERNEL); - if (file->f_security == NULL) - return -ENOMEM; - return 0; -} - -/** * lsm_inode_alloc - allocate a composite inode blob * @inode: the inode that needs a blob * @gfp: allocation flags @@ -766,7 +239,7 @@ static int lsm_inode_alloc(struct inode *inode, gfp_t gfp) * * Returns 0, or -ENOMEM if memory can't be allocated. */ -static int lsm_task_alloc(struct task_struct *task) +int lsm_task_alloc(struct task_struct *task) { return lsm_blob_alloc(&task->security, blob_sizes.lbs_task, GFP_KERNEL); } @@ -823,31 +296,50 @@ static int lsm_msg_msg_alloc(struct msg_msg *mp) */ static int lsm_bdev_alloc(struct block_device *bdev) { - if (blob_sizes.lbs_bdev == 0) { - bdev->bd_security = NULL; - return 0; - } - - bdev->bd_security = kzalloc(blob_sizes.lbs_bdev, GFP_KERNEL); - if (!bdev->bd_security) - return -ENOMEM; + return lsm_blob_alloc(&bdev->bd_security, blob_sizes.lbs_bdev, + GFP_KERNEL); +} - return 0; +#ifdef CONFIG_BPF_SYSCALL +/** + * lsm_bpf_map_alloc - allocate a composite bpf_map blob + * @map: the bpf_map that needs a blob + * + * Allocate the bpf_map blob for all the modules + * + * Returns 0, or -ENOMEM if memory can't be allocated. + */ +static int lsm_bpf_map_alloc(struct bpf_map *map) +{ + return lsm_blob_alloc(&map->security, blob_sizes.lbs_bpf_map, GFP_KERNEL); } /** - * lsm_early_task - during initialization allocate a composite task blob - * @task: the task that needs a blob + * lsm_bpf_prog_alloc - allocate a composite bpf_prog blob + * @prog: the bpf_prog that needs a blob * - * Allocate the task blob for all the modules + * Allocate the bpf_prog blob for all the modules + * + * Returns 0, or -ENOMEM if memory can't be allocated. */ -static void __init lsm_early_task(struct task_struct *task) +static int lsm_bpf_prog_alloc(struct bpf_prog *prog) { - int rc = lsm_task_alloc(task); + return lsm_blob_alloc(&prog->aux->security, blob_sizes.lbs_bpf_prog, GFP_KERNEL); +} - if (rc) - panic("%s: Early task alloc failed.\n", __func__); +/** + * lsm_bpf_token_alloc - allocate a composite bpf_token blob + * @token: the bpf_token that needs a blob + * + * Allocate the bpf_token blob for all the modules + * + * Returns 0, or -ENOMEM if memory can't be allocated. + */ +static int lsm_bpf_token_alloc(struct bpf_token *token) +{ + return lsm_blob_alloc(&token->security, blob_sizes.lbs_bpf_token, GFP_KERNEL); } +#endif /* CONFIG_BPF_SYSCALL */ /** * lsm_superblock_alloc - allocate a composite superblock blob @@ -1775,7 +1267,7 @@ EXPORT_SYMBOL(security_dentry_init_security); * Return: Returns 0 on success, error on failure. */ int security_dentry_create_files_as(struct dentry *dentry, int mode, - struct qstr *name, + const struct qstr *name, const struct cred *old, struct cred *new) { return call_int_hook(dentry_create_files_as, dentry, mode, @@ -2181,7 +1673,7 @@ int security_inode_symlink(struct inode *dir, struct dentry *dentry, } /** - * security_inode_mkdir() - Check if creation a new director is allowed + * security_inode_mkdir() - Check if creating a new directory is allowed * @dir: parent directory * @dentry: new directory * @mode: new directory mode @@ -2623,6 +2115,36 @@ void security_inode_post_removexattr(struct dentry *dentry, const char *name) } /** + * security_inode_file_setattr() - check if setting fsxattr is allowed + * @dentry: file to set filesystem extended attributes on + * @fa: extended attributes to set on the inode + * + * Called when file_setattr() syscall or FS_IOC_FSSETXATTR ioctl() is called on + * inode + * + * Return: Returns 0 if permission is granted. + */ +int security_inode_file_setattr(struct dentry *dentry, struct file_kattr *fa) +{ + return call_int_hook(inode_file_setattr, dentry, fa); +} + +/** + * security_inode_file_getattr() - check if retrieving fsxattr is allowed + * @dentry: file to retrieve filesystem extended attributes from + * @fa: extended attributes to get + * + * Called when file_getattr() syscall or FS_IOC_FSGETXATTR ioctl() is called on + * inode + * + * Return: Returns 0 if permission is granted. + */ +int security_inode_file_getattr(struct dentry *dentry, struct file_kattr *fa) +{ + return call_int_hook(inode_file_getattr, dentry, fa); +} + +/** * security_inode_need_killpriv() - Check if security_inode_killpriv() required * @dentry: associated dentry * @@ -3155,7 +2677,7 @@ int security_file_truncate(struct file *file) * * Return: Returns a zero on success, negative values on failure. */ -int security_task_alloc(struct task_struct *task, unsigned long clone_flags) +int security_task_alloc(struct task_struct *task, u64 clone_flags) { int rc = lsm_task_alloc(task); @@ -4312,17 +3834,31 @@ EXPORT_SYMBOL(security_secid_to_secctx); * security_lsmprop_to_secctx() - Convert a lsm_prop to a secctx * @prop: lsm specific information * @cp: the LSM context + * @lsmid: which security module to report * * Convert a @prop entry to security context. If @cp is NULL the * length of the result will be returned. This does mean that the * length could change between calls to check the length and the * next call which actually allocates and returns the @cp. * + * @lsmid identifies which LSM should supply the context. + * A value of LSM_ID_UNDEF indicates that the first LSM suppling + * the hook should be used. This is used in cases where the + * ID of the supplying LSM is unambiguous. + * * Return: Return length of data on success, error on failure. */ -int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp) +int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp, + int lsmid) { - return call_int_hook(lsmprop_to_secctx, prop, cp); + struct lsm_static_call *scall; + + lsm_for_each_hook(scall, lsmprop_to_secctx) { + if (lsmid != LSM_ID_UNDEF && lsmid != scall->hl->lsmid->id) + continue; + return scall->hl->hook.lsmprop_to_secctx(prop, cp); + } + return LSM_RET_DEFAULT(lsmprop_to_secctx); } EXPORT_SYMBOL(security_lsmprop_to_secctx); @@ -5684,7 +5220,16 @@ int security_bpf_prog(struct bpf_prog *prog) int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr, struct bpf_token *token, bool kernel) { - return call_int_hook(bpf_map_create, map, attr, token, kernel); + int rc; + + rc = lsm_bpf_map_alloc(map); + if (unlikely(rc)) + return rc; + + rc = call_int_hook(bpf_map_create, map, attr, token, kernel); + if (unlikely(rc)) + security_bpf_map_free(map); + return rc; } /** @@ -5703,7 +5248,16 @@ int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr, int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, struct bpf_token *token, bool kernel) { - return call_int_hook(bpf_prog_load, prog, attr, token, kernel); + int rc; + + rc = lsm_bpf_prog_alloc(prog); + if (unlikely(rc)) + return rc; + + rc = call_int_hook(bpf_prog_load, prog, attr, token, kernel); + if (unlikely(rc)) + security_bpf_prog_free(prog); + return rc; } /** @@ -5720,7 +5274,16 @@ int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, const struct path *path) { - return call_int_hook(bpf_token_create, token, attr, path); + int rc; + + rc = lsm_bpf_token_alloc(token); + if (unlikely(rc)) + return rc; + + rc = call_int_hook(bpf_token_create, token, attr, path); + if (unlikely(rc)) + security_bpf_token_free(token); + return rc; } /** @@ -5764,6 +5327,8 @@ int security_bpf_token_capable(const struct bpf_token *token, int cap) void security_bpf_map_free(struct bpf_map *map) { call_void_hook(bpf_map_free, map); + kfree(map->security); + map->security = NULL; } /** @@ -5775,6 +5340,8 @@ void security_bpf_map_free(struct bpf_map *map) void security_bpf_prog_free(struct bpf_prog *prog) { call_void_hook(bpf_prog_free, prog); + kfree(prog->aux->security); + prog->aux->security = NULL; } /** @@ -5786,6 +5353,8 @@ void security_bpf_prog_free(struct bpf_prog *prog) void security_bpf_token_free(struct bpf_token *token) { call_void_hook(bpf_token_free, token); + kfree(token->security); + token->security = NULL; } #endif /* CONFIG_BPF_SYSCALL */ |