blob: c915961c4df2919185ad29174f090cd8dfdcc309 [file] [log] [blame]
Ankita Garg8bb31b92006-10-02 02:17:36 -07001/*
Kees Cook426f3a52016-06-03 11:16:32 -07002 * Linux Kernel Dump Test Module for testing kernel crashes conditions:
3 * induces system failures at predefined crashpoints and under predefined
4 * operational conditions in order to evaluate the reliability of kernel
5 * sanity checking and crash dumps obtained using different dumping
6 * solutions.
Ankita Garg8bb31b92006-10-02 02:17:36 -07007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * Copyright (C) IBM Corporation, 2006
23 *
24 * Author: Ankita Garg <[email protected]>
25 *
Ankita Garg8bb31b92006-10-02 02:17:36 -070026 * It is adapted from the Linux Kernel Dump Test Tool by
27 * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
28 *
Simon Kagstrom0347af42010-03-05 13:42:49 -080029 * Debugfs support added by Simon Kagstrom <[email protected]>
Ankita Garg8bb31b92006-10-02 02:17:36 -070030 *
Simon Kagstrom0347af42010-03-05 13:42:49 -080031 * See Documentation/fault-injection/provoke-crashes.txt for instructions
Ankita Garg8bb31b92006-10-02 02:17:36 -070032 */
Kees Cook426f3a52016-06-03 11:16:32 -070033#define pr_fmt(fmt) "lkdtm: " fmt
Ankita Garg8bb31b92006-10-02 02:17:36 -070034
35#include <linux/kernel.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080036#include <linux/fs.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070037#include <linux/module.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080038#include <linux/buffer_head.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070039#include <linux/kprobes.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080040#include <linux/list.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070041#include <linux/init.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070042#include <linux/interrupt.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080043#include <linux/hrtimer.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090044#include <linux/slab.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070045#include <scsi/scsi_cmnd.h>
Simon Kagstrom0347af42010-03-05 13:42:49 -080046#include <linux/debugfs.h>
Kees Cookcc33c5372013-07-08 10:01:33 -070047#include <linux/vmalloc.h>
Kees Cook9ae113c2013-10-24 09:25:57 -070048#include <linux/mman.h>
Kees Cook1bc9fac2014-02-14 15:58:50 -080049#include <asm/cacheflush.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070050
51#ifdef CONFIG_IDE
52#include <linux/ide.h>
53#endif
54
Kees Cook9a49a522016-02-22 14:09:29 -080055#include "lkdtm.h"
56
Kees Cook7d196ac2013-10-24 09:25:39 -070057/*
58 * Make sure our attempts to over run the kernel stack doesn't trigger
59 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
60 * recurse past the end of THREAD_SIZE by default.
61 */
62#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
63#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
64#else
65#define REC_STACK_SIZE (THREAD_SIZE / 8)
66#endif
67#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
68
Ankita Garg8bb31b92006-10-02 02:17:36 -070069#define DEFAULT_COUNT 10
Kees Cookcc33c5372013-07-08 10:01:33 -070070#define EXEC_SIZE 64
Ankita Garg8bb31b92006-10-02 02:17:36 -070071
72enum cname {
Namhyung Kim93e2f582010-10-26 14:22:40 -070073 CN_INVALID,
74 CN_INT_HARDWARE_ENTRY,
75 CN_INT_HW_IRQ_EN,
76 CN_INT_TASKLET_ENTRY,
77 CN_FS_DEVRW,
78 CN_MEM_SWAPOUT,
79 CN_TIMERADD,
80 CN_SCSI_DISPATCH_CMD,
81 CN_IDE_CORE_CP,
82 CN_DIRECT,
Ankita Garg8bb31b92006-10-02 02:17:36 -070083};
84
85enum ctype {
Namhyung Kim93e2f582010-10-26 14:22:40 -070086 CT_NONE,
87 CT_PANIC,
88 CT_BUG,
Kees Cook65892722013-07-08 10:01:31 -070089 CT_WARNING,
Namhyung Kim93e2f582010-10-26 14:22:40 -070090 CT_EXCEPTION,
91 CT_LOOP,
92 CT_OVERFLOW,
93 CT_CORRUPT_STACK,
94 CT_UNALIGNED_LOAD_STORE_WRITE,
95 CT_OVERWRITE_ALLOCATION,
96 CT_WRITE_AFTER_FREE,
Laura Abbottbc0b8cc2016-02-25 16:36:42 -080097 CT_READ_AFTER_FREE,
Laura Abbott920d4512016-02-25 16:36:44 -080098 CT_WRITE_BUDDY_AFTER_FREE,
99 CT_READ_BUDDY_AFTER_FREE,
Namhyung Kim93e2f582010-10-26 14:22:40 -0700100 CT_SOFTLOCKUP,
101 CT_HARDLOCKUP,
Kees Cook274a5852013-07-08 10:01:32 -0700102 CT_SPINLOCKUP,
Namhyung Kim93e2f582010-10-26 14:22:40 -0700103 CT_HUNG_TASK,
Kees Cookcc33c5372013-07-08 10:01:33 -0700104 CT_EXEC_DATA,
105 CT_EXEC_STACK,
106 CT_EXEC_KMALLOC,
107 CT_EXEC_VMALLOC,
Kees Cook9a49a522016-02-22 14:09:29 -0800108 CT_EXEC_RODATA,
Kees Cook9ae113c2013-10-24 09:25:57 -0700109 CT_EXEC_USERSPACE,
110 CT_ACCESS_USERSPACE,
111 CT_WRITE_RO,
Kees Cook7cca0712016-02-17 14:41:16 -0800112 CT_WRITE_RO_AFTER_INIT,
Kees Cookdc2b9e92014-02-09 13:48:48 -0800113 CT_WRITE_KERN,
Kees Cookb5484522016-06-07 14:27:02 -0700114 CT_ATOMIC_UNDERFLOW,
115 CT_ATOMIC_OVERFLOW,
Kees Cookaa981a62016-06-03 12:06:52 -0700116 CT_USERCOPY_HEAP_SIZE_TO,
117 CT_USERCOPY_HEAP_SIZE_FROM,
118 CT_USERCOPY_HEAP_FLAG_TO,
119 CT_USERCOPY_HEAP_FLAG_FROM,
120 CT_USERCOPY_STACK_FRAME_TO,
121 CT_USERCOPY_STACK_FRAME_FROM,
122 CT_USERCOPY_STACK_BEYOND,
Kees Cook6c352142016-06-23 22:01:26 -0700123 CT_USERCOPY_KERNEL,
Ankita Garg8bb31b92006-10-02 02:17:36 -0700124};
125
126static char* cp_name[] = {
127 "INT_HARDWARE_ENTRY",
128 "INT_HW_IRQ_EN",
129 "INT_TASKLET_ENTRY",
130 "FS_DEVRW",
131 "MEM_SWAPOUT",
132 "TIMERADD",
133 "SCSI_DISPATCH_CMD",
Simon Kagstrom0347af42010-03-05 13:42:49 -0800134 "IDE_CORE_CP",
135 "DIRECT",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700136};
137
138static char* cp_type[] = {
139 "PANIC",
140 "BUG",
Kees Cook65892722013-07-08 10:01:31 -0700141 "WARNING",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700142 "EXCEPTION",
143 "LOOP",
Simon Kagstrom0347af42010-03-05 13:42:49 -0800144 "OVERFLOW",
145 "CORRUPT_STACK",
146 "UNALIGNED_LOAD_STORE_WRITE",
147 "OVERWRITE_ALLOCATION",
148 "WRITE_AFTER_FREE",
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800149 "READ_AFTER_FREE",
Laura Abbott920d4512016-02-25 16:36:44 -0800150 "WRITE_BUDDY_AFTER_FREE",
151 "READ_BUDDY_AFTER_FREE",
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700152 "SOFTLOCKUP",
153 "HARDLOCKUP",
Kees Cook274a5852013-07-08 10:01:32 -0700154 "SPINLOCKUP",
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700155 "HUNG_TASK",
Kees Cookcc33c5372013-07-08 10:01:33 -0700156 "EXEC_DATA",
157 "EXEC_STACK",
158 "EXEC_KMALLOC",
159 "EXEC_VMALLOC",
Kees Cook9a49a522016-02-22 14:09:29 -0800160 "EXEC_RODATA",
Kees Cook9ae113c2013-10-24 09:25:57 -0700161 "EXEC_USERSPACE",
162 "ACCESS_USERSPACE",
163 "WRITE_RO",
Kees Cook7cca0712016-02-17 14:41:16 -0800164 "WRITE_RO_AFTER_INIT",
Kees Cookdc2b9e92014-02-09 13:48:48 -0800165 "WRITE_KERN",
Kees Cookb5484522016-06-07 14:27:02 -0700166 "ATOMIC_UNDERFLOW",
167 "ATOMIC_OVERFLOW",
Kees Cookaa981a62016-06-03 12:06:52 -0700168 "USERCOPY_HEAP_SIZE_TO",
169 "USERCOPY_HEAP_SIZE_FROM",
170 "USERCOPY_HEAP_FLAG_TO",
171 "USERCOPY_HEAP_FLAG_FROM",
172 "USERCOPY_STACK_FRAME_TO",
173 "USERCOPY_STACK_FRAME_FROM",
174 "USERCOPY_STACK_BEYOND",
Kees Cook6c352142016-06-23 22:01:26 -0700175 "USERCOPY_KERNEL",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700176};
177
178static struct jprobe lkdtm;
179
180static int lkdtm_parse_commandline(void);
181static void lkdtm_handler(void);
182
Al Viroec1c6202007-02-09 16:05:17 +0000183static char* cpoint_name;
184static char* cpoint_type;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700185static int cpoint_count = DEFAULT_COUNT;
186static int recur_count = REC_NUM_DEFAULT;
Kees Cookaa981a62016-06-03 12:06:52 -0700187static int alloc_size = 1024;
188static size_t cache_size;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700189
Namhyung Kim93e2f582010-10-26 14:22:40 -0700190static enum cname cpoint = CN_INVALID;
191static enum ctype cptype = CT_NONE;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700192static int count = DEFAULT_COUNT;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700193static DEFINE_SPINLOCK(count_lock);
Kees Cook274a5852013-07-08 10:01:32 -0700194static DEFINE_SPINLOCK(lock_me_up);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700195
Kees Cookcc33c5372013-07-08 10:01:33 -0700196static u8 data_area[EXEC_SIZE];
Kees Cookaa981a62016-06-03 12:06:52 -0700197static struct kmem_cache *bad_cache;
Kees Cookcc33c5372013-07-08 10:01:33 -0700198
Kees Cookaa981a62016-06-03 12:06:52 -0700199static const unsigned char test_text[] = "This is a test.\n";
Kees Cook9ae113c2013-10-24 09:25:57 -0700200static const unsigned long rodata = 0xAA55AA55;
Kees Cook7cca0712016-02-17 14:41:16 -0800201static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
Kees Cook9ae113c2013-10-24 09:25:57 -0700202
Ankita Garg8bb31b92006-10-02 02:17:36 -0700203module_param(recur_count, int, 0644);
Kees Cook7d196ac2013-10-24 09:25:39 -0700204MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
Rusty Russelldca41302010-08-11 23:04:21 -0600205module_param(cpoint_name, charp, 0444);
Randy Dunlap5d861d92006-11-02 22:07:06 -0800206MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
Rusty Russelldca41302010-08-11 23:04:21 -0600207module_param(cpoint_type, charp, 0444);
Randy Dunlap5d861d92006-11-02 22:07:06 -0800208MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
209 "hitting the crash point");
210module_param(cpoint_count, int, 0644);
211MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
212 "crash point is to be hit to trigger action");
Kees Cookaa981a62016-06-03 12:06:52 -0700213module_param(alloc_size, int, 0644);
214MODULE_PARM_DESC(alloc_size, " Size of allocation for user copy tests "\
215 "(from 1 to PAGE_SIZE)");
Ankita Garg8bb31b92006-10-02 02:17:36 -0700216
Adrian Bunk21181162008-02-06 01:36:50 -0800217static unsigned int jp_do_irq(unsigned int irq)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700218{
219 lkdtm_handler();
220 jprobe_return();
221 return 0;
222}
223
Adrian Bunk21181162008-02-06 01:36:50 -0800224static irqreturn_t jp_handle_irq_event(unsigned int irq,
225 struct irqaction *action)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700226{
227 lkdtm_handler();
228 jprobe_return();
229 return 0;
230}
231
Adrian Bunk21181162008-02-06 01:36:50 -0800232static void jp_tasklet_action(struct softirq_action *a)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700233{
234 lkdtm_handler();
235 jprobe_return();
236}
237
Adrian Bunk21181162008-02-06 01:36:50 -0800238static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
Ankita Garg8bb31b92006-10-02 02:17:36 -0700239{
240 lkdtm_handler();
241 jprobe_return();
242}
243
244struct scan_control;
245
Adrian Bunk21181162008-02-06 01:36:50 -0800246static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
247 struct zone *zone,
248 struct scan_control *sc)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700249{
250 lkdtm_handler();
251 jprobe_return();
252 return 0;
253}
254
Adrian Bunk21181162008-02-06 01:36:50 -0800255static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
256 const enum hrtimer_mode mode)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700257{
258 lkdtm_handler();
259 jprobe_return();
260 return 0;
261}
262
Adrian Bunk21181162008-02-06 01:36:50 -0800263static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700264{
265 lkdtm_handler();
266 jprobe_return();
267 return 0;
268}
269
270#ifdef CONFIG_IDE
Rashika Kheria44629432013-12-13 12:29:42 +0530271static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
Ankita Garg8bb31b92006-10-02 02:17:36 -0700272 struct block_device *bdev, unsigned int cmd,
273 unsigned long arg)
274{
275 lkdtm_handler();
276 jprobe_return();
277 return 0;
278}
279#endif
280
Simon Kagstrom0347af42010-03-05 13:42:49 -0800281/* Return the crashpoint number or NONE if the name is invalid */
282static enum ctype parse_cp_type(const char *what, size_t count)
283{
284 int i;
285
286 for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
287 if (!strcmp(what, cp_type[i]))
288 return i + 1;
289 }
290
Namhyung Kim93e2f582010-10-26 14:22:40 -0700291 return CT_NONE;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800292}
293
294static const char *cp_type_to_str(enum ctype type)
295{
Namhyung Kim93e2f582010-10-26 14:22:40 -0700296 if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
Simon Kagstrom0347af42010-03-05 13:42:49 -0800297 return "None";
298
299 return cp_type[type - 1];
300}
301
302static const char *cp_name_to_str(enum cname name)
303{
Namhyung Kim93e2f582010-10-26 14:22:40 -0700304 if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
Simon Kagstrom0347af42010-03-05 13:42:49 -0800305 return "INVALID";
306
307 return cp_name[name - 1];
308}
309
310
Ankita Garg8bb31b92006-10-02 02:17:36 -0700311static int lkdtm_parse_commandline(void)
312{
313 int i;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700314 unsigned long flags;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700315
Simon Kagstrom0347af42010-03-05 13:42:49 -0800316 if (cpoint_count < 1 || recur_count < 1)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700317 return -EINVAL;
318
Josh Huntaa2c96d2011-06-27 16:18:08 -0700319 spin_lock_irqsave(&count_lock, flags);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700320 count = cpoint_count;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700321 spin_unlock_irqrestore(&count_lock, flags);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700322
Simon Kagstrom0347af42010-03-05 13:42:49 -0800323 /* No special parameters */
324 if (!cpoint_type && !cpoint_name)
325 return 0;
326
327 /* Neither or both of these need to be set */
328 if (!cpoint_type || !cpoint_name)
329 return -EINVAL;
330
331 cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
Namhyung Kim93e2f582010-10-26 14:22:40 -0700332 if (cptype == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -0800333 return -EINVAL;
334
335 for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
336 if (!strcmp(cpoint_name, cp_name[i])) {
337 cpoint = i + 1;
338 return 0;
339 }
340 }
341
342 /* Could not find a valid crash point */
343 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700344}
345
Kees Cook7d196ac2013-10-24 09:25:39 -0700346static int recursive_loop(int remaining)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700347{
Kees Cook7d196ac2013-10-24 09:25:39 -0700348 char buf[REC_STACK_SIZE];
Ankita Garg8bb31b92006-10-02 02:17:36 -0700349
Kees Cook7d196ac2013-10-24 09:25:39 -0700350 /* Make sure compiler does not optimize this away. */
351 memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
352 if (!remaining)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700353 return 0;
354 else
Kees Cook7d196ac2013-10-24 09:25:39 -0700355 return recursive_loop(remaining - 1);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700356}
357
Kees Cookcc33c5372013-07-08 10:01:33 -0700358static void do_nothing(void)
359{
360 return;
361}
362
Kees Cookdc2b9e92014-02-09 13:48:48 -0800363/* Must immediately follow do_nothing for size calculuations to work out. */
364static void do_overwritten(void)
365{
366 pr_info("do_overwritten wasn't overwritten!\n");
367 return;
368}
369
Kees Cook629c66a2013-10-24 18:05:42 -0700370static noinline void corrupt_stack(void)
371{
372 /* Use default char array length that triggers stack protection. */
373 char data[8];
374
375 memset((void *)data, 0, 64);
376}
377
Kees Cook9a49a522016-02-22 14:09:29 -0800378static noinline void execute_location(void *dst, bool write)
Kees Cookcc33c5372013-07-08 10:01:33 -0700379{
380 void (*func)(void) = dst;
381
Kees Cookaac416f2014-02-09 13:48:47 -0800382 pr_info("attempting ok execution at %p\n", do_nothing);
383 do_nothing();
384
Kees Cook9a49a522016-02-22 14:09:29 -0800385 if (write) {
386 memcpy(dst, do_nothing, EXEC_SIZE);
387 flush_icache_range((unsigned long)dst,
388 (unsigned long)dst + EXEC_SIZE);
389 }
Kees Cookaac416f2014-02-09 13:48:47 -0800390 pr_info("attempting bad execution at %p\n", func);
Kees Cookcc33c5372013-07-08 10:01:33 -0700391 func();
392}
393
Kees Cook9ae113c2013-10-24 09:25:57 -0700394static void execute_user_location(void *dst)
395{
Kees Cook51236622013-11-11 11:23:49 -0800396 /* Intentionally crossing kernel/user memory boundary. */
Kees Cook9ae113c2013-10-24 09:25:57 -0700397 void (*func)(void) = dst;
398
Kees Cookaac416f2014-02-09 13:48:47 -0800399 pr_info("attempting ok execution at %p\n", do_nothing);
400 do_nothing();
401
Kees Cook51236622013-11-11 11:23:49 -0800402 if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
Kees Cook9ae113c2013-10-24 09:25:57 -0700403 return;
Kees Cookaac416f2014-02-09 13:48:47 -0800404 flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
405 pr_info("attempting bad execution at %p\n", func);
Kees Cook9ae113c2013-10-24 09:25:57 -0700406 func();
407}
408
Kees Cookaa981a62016-06-03 12:06:52 -0700409/*
410 * Instead of adding -Wno-return-local-addr, just pass the stack address
411 * through a function to obfuscate it from the compiler.
412 */
413static noinline unsigned char *trick_compiler(unsigned char *stack)
414{
415 return stack + 0;
416}
417
418static noinline unsigned char *do_usercopy_stack_callee(int value)
419{
420 unsigned char buf[32];
421 int i;
422
423 /* Exercise stack to avoid everything living in registers. */
424 for (i = 0; i < sizeof(buf); i++) {
425 buf[i] = value & 0xff;
426 }
427
428 return trick_compiler(buf);
429}
430
431static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
432{
433 unsigned long user_addr;
434 unsigned char good_stack[32];
435 unsigned char *bad_stack;
436 int i;
437
438 /* Exercise stack to avoid everything living in registers. */
439 for (i = 0; i < sizeof(good_stack); i++)
440 good_stack[i] = test_text[i % sizeof(test_text)];
441
442 /* This is a pointer to outside our current stack frame. */
443 if (bad_frame) {
444 bad_stack = do_usercopy_stack_callee(alloc_size);
445 } else {
446 /* Put start address just inside stack. */
447 bad_stack = task_stack_page(current) + THREAD_SIZE;
448 bad_stack -= sizeof(unsigned long);
449 }
450
451 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
452 PROT_READ | PROT_WRITE | PROT_EXEC,
453 MAP_ANONYMOUS | MAP_PRIVATE, 0);
454 if (user_addr >= TASK_SIZE) {
455 pr_warn("Failed to allocate user memory\n");
456 return;
457 }
458
459 if (to_user) {
460 pr_info("attempting good copy_to_user of local stack\n");
461 if (copy_to_user((void __user *)user_addr, good_stack,
462 sizeof(good_stack))) {
463 pr_warn("copy_to_user failed unexpectedly?!\n");
464 goto free_user;
465 }
466
467 pr_info("attempting bad copy_to_user of distant stack\n");
468 if (copy_to_user((void __user *)user_addr, bad_stack,
469 sizeof(good_stack))) {
470 pr_warn("copy_to_user failed, but lacked Oops\n");
471 goto free_user;
472 }
473 } else {
474 /*
475 * There isn't a safe way to not be protected by usercopy
476 * if we're going to write to another thread's stack.
477 */
478 if (!bad_frame)
479 goto free_user;
480
481 pr_info("attempting good copy_from_user of local stack\n");
482 if (copy_from_user(good_stack, (void __user *)user_addr,
483 sizeof(good_stack))) {
484 pr_warn("copy_from_user failed unexpectedly?!\n");
485 goto free_user;
486 }
487
488 pr_info("attempting bad copy_from_user of distant stack\n");
489 if (copy_from_user(bad_stack, (void __user *)user_addr,
490 sizeof(good_stack))) {
491 pr_warn("copy_from_user failed, but lacked Oops\n");
492 goto free_user;
493 }
494 }
495
496free_user:
497 vm_munmap(user_addr, PAGE_SIZE);
498}
499
Kees Cook6c352142016-06-23 22:01:26 -0700500static void do_usercopy_kernel(void)
501{
502 unsigned long user_addr;
503
504 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
505 PROT_READ | PROT_WRITE | PROT_EXEC,
506 MAP_ANONYMOUS | MAP_PRIVATE, 0);
507 if (user_addr >= TASK_SIZE) {
508 pr_warn("Failed to allocate user memory\n");
509 return;
510 }
511
512 pr_info("attempting good copy_to_user from kernel rodata\n");
513 if (copy_to_user((void __user *)user_addr, test_text,
514 sizeof(test_text))) {
515 pr_warn("copy_to_user failed unexpectedly?!\n");
516 goto free_user;
517 }
518
519 pr_info("attempting bad copy_to_user from kernel text\n");
520 if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
521 pr_warn("copy_to_user failed, but lacked Oops\n");
522 goto free_user;
523 }
524
525free_user:
526 vm_munmap(user_addr, PAGE_SIZE);
527}
528
Kees Cookaa981a62016-06-03 12:06:52 -0700529static void do_usercopy_heap_size(bool to_user)
530{
531 unsigned long user_addr;
532 unsigned char *one, *two;
533 size_t size = clamp_t(int, alloc_size, 1, PAGE_SIZE);
534
535 one = kmalloc(size, GFP_KERNEL);
536 two = kmalloc(size, GFP_KERNEL);
537 if (!one || !two) {
538 pr_warn("Failed to allocate kernel memory\n");
539 goto free_kernel;
540 }
541
542 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
543 PROT_READ | PROT_WRITE | PROT_EXEC,
544 MAP_ANONYMOUS | MAP_PRIVATE, 0);
545 if (user_addr >= TASK_SIZE) {
546 pr_warn("Failed to allocate user memory\n");
547 goto free_kernel;
548 }
549
550 memset(one, 'A', size);
551 memset(two, 'B', size);
552
553 if (to_user) {
554 pr_info("attempting good copy_to_user of correct size\n");
555 if (copy_to_user((void __user *)user_addr, one, size)) {
556 pr_warn("copy_to_user failed unexpectedly?!\n");
557 goto free_user;
558 }
559
560 pr_info("attempting bad copy_to_user of too large size\n");
561 if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
562 pr_warn("copy_to_user failed, but lacked Oops\n");
563 goto free_user;
564 }
565 } else {
566 pr_info("attempting good copy_from_user of correct size\n");
567 if (copy_from_user(one, (void __user *)user_addr,
568 size)) {
569 pr_warn("copy_from_user failed unexpectedly?!\n");
570 goto free_user;
571 }
572
573 pr_info("attempting bad copy_from_user of too large size\n");
574 if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
575 pr_warn("copy_from_user failed, but lacked Oops\n");
576 goto free_user;
577 }
578 }
579
580free_user:
581 vm_munmap(user_addr, PAGE_SIZE);
582free_kernel:
583 kfree(one);
584 kfree(two);
585}
586
587static void do_usercopy_heap_flag(bool to_user)
588{
589 unsigned long user_addr;
590 unsigned char *good_buf = NULL;
591 unsigned char *bad_buf = NULL;
592
593 /* Make sure cache was prepared. */
594 if (!bad_cache) {
595 pr_warn("Failed to allocate kernel cache\n");
596 return;
597 }
598
599 /*
600 * Allocate one buffer from each cache (kmalloc will have the
601 * SLAB_USERCOPY flag already, but "bad_cache" won't).
602 */
603 good_buf = kmalloc(cache_size, GFP_KERNEL);
604 bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
605 if (!good_buf || !bad_buf) {
606 pr_warn("Failed to allocate buffers from caches\n");
607 goto free_alloc;
608 }
609
610 /* Allocate user memory we'll poke at. */
611 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
612 PROT_READ | PROT_WRITE | PROT_EXEC,
613 MAP_ANONYMOUS | MAP_PRIVATE, 0);
614 if (user_addr >= TASK_SIZE) {
615 pr_warn("Failed to allocate user memory\n");
616 goto free_alloc;
617 }
618
619 memset(good_buf, 'A', cache_size);
620 memset(bad_buf, 'B', cache_size);
621
622 if (to_user) {
623 pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
624 if (copy_to_user((void __user *)user_addr, good_buf,
625 cache_size)) {
626 pr_warn("copy_to_user failed unexpectedly?!\n");
627 goto free_user;
628 }
629
630 pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
631 if (copy_to_user((void __user *)user_addr, bad_buf,
632 cache_size)) {
633 pr_warn("copy_to_user failed, but lacked Oops\n");
634 goto free_user;
635 }
636 } else {
637 pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
638 if (copy_from_user(good_buf, (void __user *)user_addr,
639 cache_size)) {
640 pr_warn("copy_from_user failed unexpectedly?!\n");
641 goto free_user;
642 }
643
644 pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
645 if (copy_from_user(bad_buf, (void __user *)user_addr,
646 cache_size)) {
647 pr_warn("copy_from_user failed, but lacked Oops\n");
648 goto free_user;
649 }
650 }
651
652free_user:
653 vm_munmap(user_addr, PAGE_SIZE);
654free_alloc:
655 if (bad_buf)
656 kmem_cache_free(bad_cache, bad_buf);
657 kfree(good_buf);
658}
659
Simon Kagstrom0347af42010-03-05 13:42:49 -0800660static void lkdtm_do_action(enum ctype which)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700661{
Simon Kagstrom0347af42010-03-05 13:42:49 -0800662 switch (which) {
Namhyung Kim93e2f582010-10-26 14:22:40 -0700663 case CT_PANIC:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800664 panic("dumptest");
665 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700666 case CT_BUG:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800667 BUG();
668 break;
Kees Cook65892722013-07-08 10:01:31 -0700669 case CT_WARNING:
670 WARN_ON(1);
671 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700672 case CT_EXCEPTION:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800673 *((int *) 0) = 0;
674 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700675 case CT_LOOP:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800676 for (;;)
677 ;
678 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700679 case CT_OVERFLOW:
Kees Cook7d196ac2013-10-24 09:25:39 -0700680 (void) recursive_loop(recur_count);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800681 break;
Kees Cook629c66a2013-10-24 18:05:42 -0700682 case CT_CORRUPT_STACK:
683 corrupt_stack();
Simon Kagstrom0347af42010-03-05 13:42:49 -0800684 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700685 case CT_UNALIGNED_LOAD_STORE_WRITE: {
Simon Kagstrom0347af42010-03-05 13:42:49 -0800686 static u8 data[5] __attribute__((aligned(4))) = {1, 2,
687 3, 4, 5};
688 u32 *p;
689 u32 val = 0x12345678;
690
691 p = (u32 *)(data + 1);
692 if (*p == 0)
693 val = 0x87654321;
694 *p = val;
695 break;
696 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700697 case CT_OVERWRITE_ALLOCATION: {
Simon Kagstrom0347af42010-03-05 13:42:49 -0800698 size_t len = 1020;
699 u32 *data = kmalloc(len, GFP_KERNEL);
700
701 data[1024 / sizeof(u32)] = 0x12345678;
702 kfree(data);
703 break;
704 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700705 case CT_WRITE_AFTER_FREE: {
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800706 int *base, *again;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800707 size_t len = 1024;
Laura Abbott250a8982016-02-25 16:36:43 -0800708 /*
709 * The slub allocator uses the first word to store the free
710 * pointer in some configurations. Use the middle of the
711 * allocation to avoid running into the freelist
712 */
713 size_t offset = (len / sizeof(*base)) / 2;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800714
Laura Abbott250a8982016-02-25 16:36:43 -0800715 base = kmalloc(len, GFP_KERNEL);
716 pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
Laura Abbott250a8982016-02-25 16:36:43 -0800717 pr_info("Attempting bad write to freed memory at %p\n",
718 &base[offset]);
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800719 kfree(base);
Laura Abbott250a8982016-02-25 16:36:43 -0800720 base[offset] = 0x0abcdef0;
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800721 /* Attempt to notice the overwrite. */
722 again = kmalloc(len, GFP_KERNEL);
723 kfree(again);
724 if (again != base)
725 pr_info("Hmm, didn't get the same memory range.\n");
726
Simon Kagstrom0347af42010-03-05 13:42:49 -0800727 break;
728 }
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800729 case CT_READ_AFTER_FREE: {
730 int *base, *val, saw;
731 size_t len = 1024;
732 /*
733 * The slub allocator uses the first word to store the free
734 * pointer in some configurations. Use the middle of the
735 * allocation to avoid running into the freelist
736 */
737 size_t offset = (len / sizeof(*base)) / 2;
738
739 base = kmalloc(len, GFP_KERNEL);
740 if (!base)
741 break;
742
743 val = kmalloc(len, GFP_KERNEL);
Sudip Mukherjeed2e10082016-04-05 22:41:06 +0530744 if (!val) {
745 kfree(base);
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800746 break;
Sudip Mukherjeed2e10082016-04-05 22:41:06 +0530747 }
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800748
749 *val = 0x12345678;
750 base[offset] = *val;
751 pr_info("Value in memory before free: %x\n", base[offset]);
752
753 kfree(base);
754
755 pr_info("Attempting bad read from freed memory\n");
756 saw = base[offset];
757 if (saw != *val) {
758 /* Good! Poisoning happened, so declare a win. */
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800759 pr_info("Memory correctly poisoned (%x)\n", saw);
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800760 BUG();
761 }
762 pr_info("Memory was not poisoned\n");
763
764 kfree(val);
765 break;
766 }
Laura Abbott920d4512016-02-25 16:36:44 -0800767 case CT_WRITE_BUDDY_AFTER_FREE: {
768 unsigned long p = __get_free_page(GFP_KERNEL);
769 if (!p)
770 break;
771 pr_info("Writing to the buddy page before free\n");
772 memset((void *)p, 0x3, PAGE_SIZE);
773 free_page(p);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800774 schedule();
Laura Abbott920d4512016-02-25 16:36:44 -0800775 pr_info("Attempting bad write to the buddy page after free\n");
776 memset((void *)p, 0x78, PAGE_SIZE);
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800777 /* Attempt to notice the overwrite. */
778 p = __get_free_page(GFP_KERNEL);
779 free_page(p);
780 schedule();
781
Laura Abbott920d4512016-02-25 16:36:44 -0800782 break;
783 }
784 case CT_READ_BUDDY_AFTER_FREE: {
785 unsigned long p = __get_free_page(GFP_KERNEL);
Sudip Mukherjee50fbd972016-04-05 22:41:05 +0530786 int saw, *val;
Laura Abbott920d4512016-02-25 16:36:44 -0800787 int *base;
788
789 if (!p)
790 break;
791
Sudip Mukherjee50fbd972016-04-05 22:41:05 +0530792 val = kmalloc(1024, GFP_KERNEL);
Kees Cook3d085c72016-04-06 15:53:27 -0700793 if (!val) {
794 free_page(p);
Laura Abbott920d4512016-02-25 16:36:44 -0800795 break;
Kees Cook3d085c72016-04-06 15:53:27 -0700796 }
Laura Abbott920d4512016-02-25 16:36:44 -0800797
798 base = (int *)p;
799
800 *val = 0x12345678;
801 base[0] = *val;
802 pr_info("Value in memory before free: %x\n", base[0]);
803 free_page(p);
804 pr_info("Attempting to read from freed memory\n");
805 saw = base[0];
806 if (saw != *val) {
807 /* Good! Poisoning happened, so declare a win. */
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800808 pr_info("Memory correctly poisoned (%x)\n", saw);
Laura Abbott920d4512016-02-25 16:36:44 -0800809 BUG();
810 }
811 pr_info("Buddy page was not poisoned\n");
812
813 kfree(val);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800814 break;
815 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700816 case CT_SOFTLOCKUP:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700817 preempt_disable();
818 for (;;)
819 cpu_relax();
820 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700821 case CT_HARDLOCKUP:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700822 local_irq_disable();
823 for (;;)
824 cpu_relax();
825 break;
Kees Cook274a5852013-07-08 10:01:32 -0700826 case CT_SPINLOCKUP:
827 /* Must be called twice to trigger. */
828 spin_lock(&lock_me_up);
Kees Cook51236622013-11-11 11:23:49 -0800829 /* Let sparse know we intended to exit holding the lock. */
830 __release(&lock_me_up);
Kees Cook274a5852013-07-08 10:01:32 -0700831 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700832 case CT_HUNG_TASK:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700833 set_current_state(TASK_UNINTERRUPTIBLE);
834 schedule();
835 break;
Kees Cookcc33c5372013-07-08 10:01:33 -0700836 case CT_EXEC_DATA:
Kees Cook9a49a522016-02-22 14:09:29 -0800837 execute_location(data_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700838 break;
839 case CT_EXEC_STACK: {
840 u8 stack_area[EXEC_SIZE];
Kees Cook9a49a522016-02-22 14:09:29 -0800841 execute_location(stack_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700842 break;
843 }
844 case CT_EXEC_KMALLOC: {
845 u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
Kees Cook9a49a522016-02-22 14:09:29 -0800846 execute_location(kmalloc_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700847 kfree(kmalloc_area);
848 break;
849 }
850 case CT_EXEC_VMALLOC: {
851 u32 *vmalloc_area = vmalloc(EXEC_SIZE);
Kees Cook9a49a522016-02-22 14:09:29 -0800852 execute_location(vmalloc_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700853 vfree(vmalloc_area);
854 break;
855 }
Kees Cook9a49a522016-02-22 14:09:29 -0800856 case CT_EXEC_RODATA:
857 execute_location(lkdtm_rodata_do_nothing, false);
858 break;
Kees Cook9ae113c2013-10-24 09:25:57 -0700859 case CT_EXEC_USERSPACE: {
860 unsigned long user_addr;
861
862 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
863 PROT_READ | PROT_WRITE | PROT_EXEC,
864 MAP_ANONYMOUS | MAP_PRIVATE, 0);
865 if (user_addr >= TASK_SIZE) {
866 pr_warn("Failed to allocate user memory\n");
867 return;
868 }
869 execute_user_location((void *)user_addr);
870 vm_munmap(user_addr, PAGE_SIZE);
871 break;
872 }
873 case CT_ACCESS_USERSPACE: {
Stephen Smalley2cb202c2015-10-27 16:47:53 -0400874 unsigned long user_addr, tmp = 0;
Kees Cook9ae113c2013-10-24 09:25:57 -0700875 unsigned long *ptr;
876
877 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
878 PROT_READ | PROT_WRITE | PROT_EXEC,
879 MAP_ANONYMOUS | MAP_PRIVATE, 0);
880 if (user_addr >= TASK_SIZE) {
881 pr_warn("Failed to allocate user memory\n");
882 return;
883 }
884
Stephen Smalley2cb202c2015-10-27 16:47:53 -0400885 if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
886 pr_warn("copy_to_user failed\n");
887 vm_munmap(user_addr, PAGE_SIZE);
888 return;
889 }
890
Kees Cook9ae113c2013-10-24 09:25:57 -0700891 ptr = (unsigned long *)user_addr;
Kees Cookaac416f2014-02-09 13:48:47 -0800892
893 pr_info("attempting bad read at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700894 tmp = *ptr;
895 tmp += 0xc0dec0de;
Kees Cookaac416f2014-02-09 13:48:47 -0800896
897 pr_info("attempting bad write at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700898 *ptr = tmp;
899
900 vm_munmap(user_addr, PAGE_SIZE);
901
902 break;
903 }
904 case CT_WRITE_RO: {
Kees Cook7cca0712016-02-17 14:41:16 -0800905 /* Explicitly cast away "const" for the test. */
906 unsigned long *ptr = (unsigned long *)&rodata;
Kees Cook9ae113c2013-10-24 09:25:57 -0700907
Kees Cook7cca0712016-02-17 14:41:16 -0800908 pr_info("attempting bad rodata write at %p\n", ptr);
909 *ptr ^= 0xabcd1234;
Kees Cookaac416f2014-02-09 13:48:47 -0800910
Kees Cook7cca0712016-02-17 14:41:16 -0800911 break;
912 }
913 case CT_WRITE_RO_AFTER_INIT: {
914 unsigned long *ptr = &ro_after_init;
915
916 /*
917 * Verify we were written to during init. Since an Oops
918 * is considered a "success", a failure is to just skip the
919 * real test.
920 */
921 if ((*ptr & 0xAA) != 0xAA) {
922 pr_info("%p was NOT written during init!?\n", ptr);
923 break;
924 }
925
926 pr_info("attempting bad ro_after_init write at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700927 *ptr ^= 0xabcd1234;
928
929 break;
930 }
Kees Cookdc2b9e92014-02-09 13:48:48 -0800931 case CT_WRITE_KERN: {
932 size_t size;
933 unsigned char *ptr;
934
935 size = (unsigned long)do_overwritten -
936 (unsigned long)do_nothing;
937 ptr = (unsigned char *)do_overwritten;
938
939 pr_info("attempting bad %zu byte write at %p\n", size, ptr);
940 memcpy(ptr, (unsigned char *)do_nothing, size);
941 flush_icache_range((unsigned long)ptr,
942 (unsigned long)(ptr + size));
943
944 do_overwritten();
945 break;
946 }
Kees Cookb5484522016-06-07 14:27:02 -0700947 case CT_ATOMIC_UNDERFLOW: {
David Windsor5fd9e482015-12-17 00:56:36 -0500948 atomic_t under = ATOMIC_INIT(INT_MIN);
Kees Cookb5484522016-06-07 14:27:02 -0700949
950 pr_info("attempting good atomic increment\n");
951 atomic_inc(&under);
952 atomic_dec(&under);
953
954 pr_info("attempting bad atomic underflow\n");
955 atomic_dec(&under);
956 break;
957 }
958 case CT_ATOMIC_OVERFLOW: {
David Windsor5fd9e482015-12-17 00:56:36 -0500959 atomic_t over = ATOMIC_INIT(INT_MAX);
960
Kees Cookb5484522016-06-07 14:27:02 -0700961 pr_info("attempting good atomic decrement\n");
962 atomic_dec(&over);
963 atomic_inc(&over);
964
965 pr_info("attempting bad atomic overflow\n");
David Windsor5fd9e482015-12-17 00:56:36 -0500966 atomic_inc(&over);
967
968 return;
969 }
Kees Cookaa981a62016-06-03 12:06:52 -0700970 case CT_USERCOPY_HEAP_SIZE_TO:
971 do_usercopy_heap_size(true);
972 break;
973 case CT_USERCOPY_HEAP_SIZE_FROM:
974 do_usercopy_heap_size(false);
975 break;
976 case CT_USERCOPY_HEAP_FLAG_TO:
977 do_usercopy_heap_flag(true);
978 break;
979 case CT_USERCOPY_HEAP_FLAG_FROM:
980 do_usercopy_heap_flag(false);
981 break;
982 case CT_USERCOPY_STACK_FRAME_TO:
983 do_usercopy_stack(true, true);
984 break;
985 case CT_USERCOPY_STACK_FRAME_FROM:
986 do_usercopy_stack(false, true);
987 break;
988 case CT_USERCOPY_STACK_BEYOND:
989 do_usercopy_stack(true, false);
990 break;
Kees Cook6c352142016-06-23 22:01:26 -0700991 case CT_USERCOPY_KERNEL:
992 do_usercopy_kernel();
993 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700994 case CT_NONE:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800995 default:
996 break;
997 }
998
999}
1000
1001static void lkdtm_handler(void)
1002{
Josh Huntaa2c96d2011-06-27 16:18:08 -07001003 unsigned long flags;
Cong Wang92618182012-02-03 15:37:15 -08001004 bool do_it = false;
Josh Huntaa2c96d2011-06-27 16:18:08 -07001005
1006 spin_lock_irqsave(&count_lock, flags);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001007 count--;
Kees Cookfeac6e22014-02-09 13:48:46 -08001008 pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
1009 cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
Ankita Garg8bb31b92006-10-02 02:17:36 -07001010
1011 if (count == 0) {
Cong Wang92618182012-02-03 15:37:15 -08001012 do_it = true;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001013 count = cpoint_count;
1014 }
Josh Huntaa2c96d2011-06-27 16:18:08 -07001015 spin_unlock_irqrestore(&count_lock, flags);
Cong Wang92618182012-02-03 15:37:15 -08001016
1017 if (do_it)
1018 lkdtm_do_action(cptype);
Ankita Garg8bb31b92006-10-02 02:17:36 -07001019}
1020
Simon Kagstrom0347af42010-03-05 13:42:49 -08001021static int lkdtm_register_cpoint(enum cname which)
Ankita Garg8bb31b92006-10-02 02:17:36 -07001022{
1023 int ret;
1024
Namhyung Kim93e2f582010-10-26 14:22:40 -07001025 cpoint = CN_INVALID;
Simon Kagstrom0347af42010-03-05 13:42:49 -08001026 if (lkdtm.entry != NULL)
1027 unregister_jprobe(&lkdtm);
Ankita Garg8bb31b92006-10-02 02:17:36 -07001028
Simon Kagstrom0347af42010-03-05 13:42:49 -08001029 switch (which) {
Namhyung Kim93e2f582010-10-26 14:22:40 -07001030 case CN_DIRECT:
Simon Kagstrom0347af42010-03-05 13:42:49 -08001031 lkdtm_do_action(cptype);
1032 return 0;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001033 case CN_INT_HARDWARE_ENTRY:
M. Mohan Kumarf58f2fa2009-09-22 16:43:29 -07001034 lkdtm.kp.symbol_name = "do_IRQ";
Ankita Garg8bb31b92006-10-02 02:17:36 -07001035 lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
1036 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001037 case CN_INT_HW_IRQ_EN:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001038 lkdtm.kp.symbol_name = "handle_IRQ_event";
1039 lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
1040 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001041 case CN_INT_TASKLET_ENTRY:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001042 lkdtm.kp.symbol_name = "tasklet_action";
1043 lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
1044 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001045 case CN_FS_DEVRW:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001046 lkdtm.kp.symbol_name = "ll_rw_block";
1047 lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
1048 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001049 case CN_MEM_SWAPOUT:
Ankita Garg18a61e42006-11-05 23:52:07 -08001050 lkdtm.kp.symbol_name = "shrink_inactive_list";
1051 lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001052 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001053 case CN_TIMERADD:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001054 lkdtm.kp.symbol_name = "hrtimer_start";
1055 lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
1056 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001057 case CN_SCSI_DISPATCH_CMD:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001058 lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
1059 lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
1060 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001061 case CN_IDE_CORE_CP:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001062#ifdef CONFIG_IDE
1063 lkdtm.kp.symbol_name = "generic_ide_ioctl";
1064 lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
1065#else
Kees Cookfeac6e22014-02-09 13:48:46 -08001066 pr_info("Crash point not available\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001067 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001068#endif
1069 break;
1070 default:
Kees Cookfeac6e22014-02-09 13:48:46 -08001071 pr_info("Invalid Crash Point\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001072 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001073 }
1074
Simon Kagstrom0347af42010-03-05 13:42:49 -08001075 cpoint = which;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001076 if ((ret = register_jprobe(&lkdtm)) < 0) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001077 pr_info("Couldn't register jprobe\n");
Namhyung Kim93e2f582010-10-26 14:22:40 -07001078 cpoint = CN_INVALID;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001079 }
1080
Simon Kagstrom0347af42010-03-05 13:42:49 -08001081 return ret;
1082}
1083
1084static ssize_t do_register_entry(enum cname which, struct file *f,
1085 const char __user *user_buf, size_t count, loff_t *off)
1086{
1087 char *buf;
1088 int err;
1089
1090 if (count >= PAGE_SIZE)
1091 return -EINVAL;
1092
1093 buf = (char *)__get_free_page(GFP_KERNEL);
1094 if (!buf)
1095 return -ENOMEM;
1096 if (copy_from_user(buf, user_buf, count)) {
1097 free_page((unsigned long) buf);
1098 return -EFAULT;
1099 }
1100 /* NULL-terminate and remove enter */
1101 buf[count] = '\0';
1102 strim(buf);
1103
1104 cptype = parse_cp_type(buf, count);
1105 free_page((unsigned long) buf);
1106
Namhyung Kim93e2f582010-10-26 14:22:40 -07001107 if (cptype == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -08001108 return -EINVAL;
1109
1110 err = lkdtm_register_cpoint(which);
1111 if (err < 0)
1112 return err;
1113
1114 *off += count;
1115
1116 return count;
1117}
1118
1119/* Generic read callback that just prints out the available crash types */
1120static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
1121 size_t count, loff_t *off)
1122{
1123 char *buf;
1124 int i, n, out;
1125
1126 buf = (char *)__get_free_page(GFP_KERNEL);
Alan Cox086ff4b2012-07-30 14:43:24 -07001127 if (buf == NULL)
1128 return -ENOMEM;
Simon Kagstrom0347af42010-03-05 13:42:49 -08001129
1130 n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
1131 for (i = 0; i < ARRAY_SIZE(cp_type); i++)
1132 n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
1133 buf[n] = '\0';
1134
1135 out = simple_read_from_buffer(user_buf, count, off,
1136 buf, n);
1137 free_page((unsigned long) buf);
1138
1139 return out;
1140}
1141
1142static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
1143{
Ankita Garg8bb31b92006-10-02 02:17:36 -07001144 return 0;
1145}
1146
Simon Kagstrom0347af42010-03-05 13:42:49 -08001147
1148static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
1149 size_t count, loff_t *off)
1150{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001151 return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001152}
1153
1154static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
1155 size_t count, loff_t *off)
1156{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001157 return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001158}
1159
1160static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
1161 size_t count, loff_t *off)
1162{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001163 return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001164}
1165
1166static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
1167 size_t count, loff_t *off)
1168{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001169 return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001170}
1171
1172static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
1173 size_t count, loff_t *off)
1174{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001175 return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001176}
1177
1178static ssize_t timeradd_entry(struct file *f, const char __user *buf,
1179 size_t count, loff_t *off)
1180{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001181 return do_register_entry(CN_TIMERADD, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001182}
1183
1184static ssize_t scsi_dispatch_cmd_entry(struct file *f,
1185 const char __user *buf, size_t count, loff_t *off)
1186{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001187 return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001188}
1189
1190static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
1191 size_t count, loff_t *off)
1192{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001193 return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001194}
1195
1196/* Special entry to just crash directly. Available without KPROBEs */
1197static ssize_t direct_entry(struct file *f, const char __user *user_buf,
1198 size_t count, loff_t *off)
1199{
1200 enum ctype type;
1201 char *buf;
1202
1203 if (count >= PAGE_SIZE)
1204 return -EINVAL;
1205 if (count < 1)
1206 return -EINVAL;
1207
1208 buf = (char *)__get_free_page(GFP_KERNEL);
1209 if (!buf)
1210 return -ENOMEM;
1211 if (copy_from_user(buf, user_buf, count)) {
1212 free_page((unsigned long) buf);
1213 return -EFAULT;
1214 }
1215 /* NULL-terminate and remove enter */
1216 buf[count] = '\0';
1217 strim(buf);
1218
1219 type = parse_cp_type(buf, count);
1220 free_page((unsigned long) buf);
Namhyung Kim93e2f582010-10-26 14:22:40 -07001221 if (type == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -08001222 return -EINVAL;
1223
Kees Cookfeac6e22014-02-09 13:48:46 -08001224 pr_info("Performing direct entry %s\n", cp_type_to_str(type));
Simon Kagstrom0347af42010-03-05 13:42:49 -08001225 lkdtm_do_action(type);
1226 *off += count;
1227
1228 return count;
1229}
1230
1231struct crash_entry {
1232 const char *name;
1233 const struct file_operations fops;
1234};
1235
1236static const struct crash_entry crash_entries[] = {
1237 {"DIRECT", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001238 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001239 .open = lkdtm_debugfs_open,
1240 .write = direct_entry} },
1241 {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001242 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001243 .open = lkdtm_debugfs_open,
1244 .write = int_hardware_entry} },
1245 {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001246 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001247 .open = lkdtm_debugfs_open,
1248 .write = int_hw_irq_en} },
1249 {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001250 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001251 .open = lkdtm_debugfs_open,
1252 .write = int_tasklet_entry} },
1253 {"FS_DEVRW", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001254 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001255 .open = lkdtm_debugfs_open,
1256 .write = fs_devrw_entry} },
1257 {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001258 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001259 .open = lkdtm_debugfs_open,
1260 .write = mem_swapout_entry} },
1261 {"TIMERADD", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001262 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001263 .open = lkdtm_debugfs_open,
1264 .write = timeradd_entry} },
1265 {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001266 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001267 .open = lkdtm_debugfs_open,
1268 .write = scsi_dispatch_cmd_entry} },
1269 {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001270 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001271 .open = lkdtm_debugfs_open,
1272 .write = ide_core_cp_entry} },
1273};
1274
1275static struct dentry *lkdtm_debugfs_root;
1276
1277static int __init lkdtm_module_init(void)
1278{
1279 int ret = -EINVAL;
1280 int n_debugfs_entries = 1; /* Assume only the direct entry */
1281 int i;
1282
Kees Cook7cca0712016-02-17 14:41:16 -08001283 /* Make sure we can write to __ro_after_init values during __init */
1284 ro_after_init |= 0xAA;
1285
Kees Cookaa981a62016-06-03 12:06:52 -07001286 /* Prepare cache that lacks SLAB_USERCOPY flag. */
1287 cache_size = clamp_t(int, alloc_size, 1, PAGE_SIZE);
1288 bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
1289 0, NULL);
1290
Simon Kagstrom0347af42010-03-05 13:42:49 -08001291 /* Register debugfs interface */
1292 lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
1293 if (!lkdtm_debugfs_root) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001294 pr_err("creating root dir failed\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001295 return -ENODEV;
1296 }
1297
1298#ifdef CONFIG_KPROBES
1299 n_debugfs_entries = ARRAY_SIZE(crash_entries);
1300#endif
1301
1302 for (i = 0; i < n_debugfs_entries; i++) {
1303 const struct crash_entry *cur = &crash_entries[i];
1304 struct dentry *de;
1305
1306 de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
1307 NULL, &cur->fops);
1308 if (de == NULL) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001309 pr_err("could not create %s\n", cur->name);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001310 goto out_err;
1311 }
1312 }
1313
1314 if (lkdtm_parse_commandline() == -EINVAL) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001315 pr_info("Invalid command\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001316 goto out_err;
1317 }
1318
Namhyung Kim93e2f582010-10-26 14:22:40 -07001319 if (cpoint != CN_INVALID && cptype != CT_NONE) {
Simon Kagstrom0347af42010-03-05 13:42:49 -08001320 ret = lkdtm_register_cpoint(cpoint);
1321 if (ret < 0) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001322 pr_info("Invalid crash point %d\n", cpoint);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001323 goto out_err;
1324 }
Kees Cookfeac6e22014-02-09 13:48:46 -08001325 pr_info("Crash point %s of type %s registered\n",
1326 cpoint_name, cpoint_type);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001327 } else {
Kees Cookfeac6e22014-02-09 13:48:46 -08001328 pr_info("No crash points registered, enable through debugfs\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001329 }
1330
1331 return 0;
1332
1333out_err:
1334 debugfs_remove_recursive(lkdtm_debugfs_root);
1335 return ret;
1336}
1337
Adrian Bunk21181162008-02-06 01:36:50 -08001338static void __exit lkdtm_module_exit(void)
Ankita Garg8bb31b92006-10-02 02:17:36 -07001339{
Simon Kagstrom0347af42010-03-05 13:42:49 -08001340 debugfs_remove_recursive(lkdtm_debugfs_root);
1341
Kees Cookaa981a62016-06-03 12:06:52 -07001342 kmem_cache_destroy(bad_cache);
1343
Simon Kagstrom0347af42010-03-05 13:42:49 -08001344 unregister_jprobe(&lkdtm);
Kees Cookfeac6e22014-02-09 13:48:46 -08001345 pr_info("Crash point unregistered\n");
Ankita Garg8bb31b92006-10-02 02:17:36 -07001346}
1347
1348module_init(lkdtm_module_init);
1349module_exit(lkdtm_module_exit);
1350
1351MODULE_LICENSE("GPL");
Terry Chiada869202014-07-02 21:02:25 +08001352MODULE_DESCRIPTION("Kprobe module for testing crash dumps");