blob: 1454d58aa278e3c128b241fdb1e8c2ec3ceb8ff1 [file] [log] [blame]
Ankita Garg8bb31b92006-10-02 02:17:36 -07001/*
Kees Cook426f3a52016-06-03 11:16:32 -07002 * Linux Kernel Dump Test Module for testing kernel crashes conditions:
3 * induces system failures at predefined crashpoints and under predefined
4 * operational conditions in order to evaluate the reliability of kernel
5 * sanity checking and crash dumps obtained using different dumping
6 * solutions.
Ankita Garg8bb31b92006-10-02 02:17:36 -07007 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * Copyright (C) IBM Corporation, 2006
23 *
24 * Author: Ankita Garg <[email protected]>
25 *
Ankita Garg8bb31b92006-10-02 02:17:36 -070026 * It is adapted from the Linux Kernel Dump Test Tool by
27 * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
28 *
Simon Kagstrom0347af42010-03-05 13:42:49 -080029 * Debugfs support added by Simon Kagstrom <[email protected]>
Ankita Garg8bb31b92006-10-02 02:17:36 -070030 *
Simon Kagstrom0347af42010-03-05 13:42:49 -080031 * See Documentation/fault-injection/provoke-crashes.txt for instructions
Ankita Garg8bb31b92006-10-02 02:17:36 -070032 */
Kees Cook426f3a52016-06-03 11:16:32 -070033#define pr_fmt(fmt) "lkdtm: " fmt
Ankita Garg8bb31b92006-10-02 02:17:36 -070034
35#include <linux/kernel.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080036#include <linux/fs.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070037#include <linux/module.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080038#include <linux/buffer_head.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070039#include <linux/kprobes.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080040#include <linux/list.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070041#include <linux/init.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070042#include <linux/interrupt.h>
Randy Dunlap5d861d92006-11-02 22:07:06 -080043#include <linux/hrtimer.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090044#include <linux/slab.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070045#include <scsi/scsi_cmnd.h>
Simon Kagstrom0347af42010-03-05 13:42:49 -080046#include <linux/debugfs.h>
Kees Cookcc33c5372013-07-08 10:01:33 -070047#include <linux/vmalloc.h>
Kees Cook9ae113c2013-10-24 09:25:57 -070048#include <linux/mman.h>
Kees Cook1bc9fac2014-02-14 15:58:50 -080049#include <asm/cacheflush.h>
Ankita Garg8bb31b92006-10-02 02:17:36 -070050
51#ifdef CONFIG_IDE
52#include <linux/ide.h>
53#endif
54
Kees Cook9a49a522016-02-22 14:09:29 -080055#include "lkdtm.h"
56
Kees Cook7d196ac2013-10-24 09:25:39 -070057/*
58 * Make sure our attempts to over run the kernel stack doesn't trigger
59 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
60 * recurse past the end of THREAD_SIZE by default.
61 */
62#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
63#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
64#else
65#define REC_STACK_SIZE (THREAD_SIZE / 8)
66#endif
67#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
68
Ankita Garg8bb31b92006-10-02 02:17:36 -070069#define DEFAULT_COUNT 10
Kees Cookcc33c5372013-07-08 10:01:33 -070070#define EXEC_SIZE 64
Ankita Garg8bb31b92006-10-02 02:17:36 -070071
72enum cname {
Namhyung Kim93e2f582010-10-26 14:22:40 -070073 CN_INVALID,
74 CN_INT_HARDWARE_ENTRY,
75 CN_INT_HW_IRQ_EN,
76 CN_INT_TASKLET_ENTRY,
77 CN_FS_DEVRW,
78 CN_MEM_SWAPOUT,
79 CN_TIMERADD,
80 CN_SCSI_DISPATCH_CMD,
81 CN_IDE_CORE_CP,
82 CN_DIRECT,
Ankita Garg8bb31b92006-10-02 02:17:36 -070083};
84
85enum ctype {
Namhyung Kim93e2f582010-10-26 14:22:40 -070086 CT_NONE,
87 CT_PANIC,
88 CT_BUG,
Kees Cook65892722013-07-08 10:01:31 -070089 CT_WARNING,
Namhyung Kim93e2f582010-10-26 14:22:40 -070090 CT_EXCEPTION,
91 CT_LOOP,
92 CT_OVERFLOW,
93 CT_CORRUPT_STACK,
94 CT_UNALIGNED_LOAD_STORE_WRITE,
95 CT_OVERWRITE_ALLOCATION,
96 CT_WRITE_AFTER_FREE,
Laura Abbottbc0b8cc2016-02-25 16:36:42 -080097 CT_READ_AFTER_FREE,
Laura Abbott920d4512016-02-25 16:36:44 -080098 CT_WRITE_BUDDY_AFTER_FREE,
99 CT_READ_BUDDY_AFTER_FREE,
Namhyung Kim93e2f582010-10-26 14:22:40 -0700100 CT_SOFTLOCKUP,
101 CT_HARDLOCKUP,
Kees Cook274a5852013-07-08 10:01:32 -0700102 CT_SPINLOCKUP,
Namhyung Kim93e2f582010-10-26 14:22:40 -0700103 CT_HUNG_TASK,
Kees Cookcc33c5372013-07-08 10:01:33 -0700104 CT_EXEC_DATA,
105 CT_EXEC_STACK,
106 CT_EXEC_KMALLOC,
107 CT_EXEC_VMALLOC,
Kees Cook9a49a522016-02-22 14:09:29 -0800108 CT_EXEC_RODATA,
Kees Cook9ae113c2013-10-24 09:25:57 -0700109 CT_EXEC_USERSPACE,
110 CT_ACCESS_USERSPACE,
111 CT_WRITE_RO,
Kees Cook7cca0712016-02-17 14:41:16 -0800112 CT_WRITE_RO_AFTER_INIT,
Kees Cookdc2b9e92014-02-09 13:48:48 -0800113 CT_WRITE_KERN,
Kees Cookb5484522016-06-07 14:27:02 -0700114 CT_ATOMIC_UNDERFLOW,
115 CT_ATOMIC_OVERFLOW,
Kees Cookaa981a62016-06-03 12:06:52 -0700116 CT_USERCOPY_HEAP_SIZE_TO,
117 CT_USERCOPY_HEAP_SIZE_FROM,
118 CT_USERCOPY_HEAP_FLAG_TO,
119 CT_USERCOPY_HEAP_FLAG_FROM,
120 CT_USERCOPY_STACK_FRAME_TO,
121 CT_USERCOPY_STACK_FRAME_FROM,
122 CT_USERCOPY_STACK_BEYOND,
Kees Cook6c352142016-06-23 22:01:26 -0700123 CT_USERCOPY_KERNEL,
Ankita Garg8bb31b92006-10-02 02:17:36 -0700124};
125
126static char* cp_name[] = {
127 "INT_HARDWARE_ENTRY",
128 "INT_HW_IRQ_EN",
129 "INT_TASKLET_ENTRY",
130 "FS_DEVRW",
131 "MEM_SWAPOUT",
132 "TIMERADD",
133 "SCSI_DISPATCH_CMD",
Simon Kagstrom0347af42010-03-05 13:42:49 -0800134 "IDE_CORE_CP",
135 "DIRECT",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700136};
137
138static char* cp_type[] = {
139 "PANIC",
140 "BUG",
Kees Cook65892722013-07-08 10:01:31 -0700141 "WARNING",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700142 "EXCEPTION",
143 "LOOP",
Simon Kagstrom0347af42010-03-05 13:42:49 -0800144 "OVERFLOW",
145 "CORRUPT_STACK",
146 "UNALIGNED_LOAD_STORE_WRITE",
147 "OVERWRITE_ALLOCATION",
148 "WRITE_AFTER_FREE",
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800149 "READ_AFTER_FREE",
Laura Abbott920d4512016-02-25 16:36:44 -0800150 "WRITE_BUDDY_AFTER_FREE",
151 "READ_BUDDY_AFTER_FREE",
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700152 "SOFTLOCKUP",
153 "HARDLOCKUP",
Kees Cook274a5852013-07-08 10:01:32 -0700154 "SPINLOCKUP",
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700155 "HUNG_TASK",
Kees Cookcc33c5372013-07-08 10:01:33 -0700156 "EXEC_DATA",
157 "EXEC_STACK",
158 "EXEC_KMALLOC",
159 "EXEC_VMALLOC",
Kees Cook9a49a522016-02-22 14:09:29 -0800160 "EXEC_RODATA",
Kees Cook9ae113c2013-10-24 09:25:57 -0700161 "EXEC_USERSPACE",
162 "ACCESS_USERSPACE",
163 "WRITE_RO",
Kees Cook7cca0712016-02-17 14:41:16 -0800164 "WRITE_RO_AFTER_INIT",
Kees Cookdc2b9e92014-02-09 13:48:48 -0800165 "WRITE_KERN",
Kees Cookb5484522016-06-07 14:27:02 -0700166 "ATOMIC_UNDERFLOW",
167 "ATOMIC_OVERFLOW",
Kees Cookaa981a62016-06-03 12:06:52 -0700168 "USERCOPY_HEAP_SIZE_TO",
169 "USERCOPY_HEAP_SIZE_FROM",
170 "USERCOPY_HEAP_FLAG_TO",
171 "USERCOPY_HEAP_FLAG_FROM",
172 "USERCOPY_STACK_FRAME_TO",
173 "USERCOPY_STACK_FRAME_FROM",
174 "USERCOPY_STACK_BEYOND",
Kees Cook6c352142016-06-23 22:01:26 -0700175 "USERCOPY_KERNEL",
Ankita Garg8bb31b92006-10-02 02:17:36 -0700176};
177
178static struct jprobe lkdtm;
179
180static int lkdtm_parse_commandline(void);
181static void lkdtm_handler(void);
182
Al Viroec1c6202007-02-09 16:05:17 +0000183static char* cpoint_name;
184static char* cpoint_type;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700185static int cpoint_count = DEFAULT_COUNT;
186static int recur_count = REC_NUM_DEFAULT;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700187
Namhyung Kim93e2f582010-10-26 14:22:40 -0700188static enum cname cpoint = CN_INVALID;
189static enum ctype cptype = CT_NONE;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700190static int count = DEFAULT_COUNT;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700191static DEFINE_SPINLOCK(count_lock);
Kees Cook274a5852013-07-08 10:01:32 -0700192static DEFINE_SPINLOCK(lock_me_up);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700193
Kees Cookcc33c5372013-07-08 10:01:33 -0700194static u8 data_area[EXEC_SIZE];
Kees Cook0edca7b2016-06-26 08:51:14 -0700195
196static size_t cache_size = 1024;
Kees Cookaa981a62016-06-03 12:06:52 -0700197static struct kmem_cache *bad_cache;
Kees Cookcc33c5372013-07-08 10:01:33 -0700198
Kees Cookaa981a62016-06-03 12:06:52 -0700199static const unsigned char test_text[] = "This is a test.\n";
Kees Cook9ae113c2013-10-24 09:25:57 -0700200static const unsigned long rodata = 0xAA55AA55;
Kees Cook7cca0712016-02-17 14:41:16 -0800201static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
Kees Cook9ae113c2013-10-24 09:25:57 -0700202
Ankita Garg8bb31b92006-10-02 02:17:36 -0700203module_param(recur_count, int, 0644);
Kees Cook7d196ac2013-10-24 09:25:39 -0700204MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
Rusty Russelldca41302010-08-11 23:04:21 -0600205module_param(cpoint_name, charp, 0444);
Randy Dunlap5d861d92006-11-02 22:07:06 -0800206MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
Rusty Russelldca41302010-08-11 23:04:21 -0600207module_param(cpoint_type, charp, 0444);
Randy Dunlap5d861d92006-11-02 22:07:06 -0800208MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
209 "hitting the crash point");
210module_param(cpoint_count, int, 0644);
211MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
212 "crash point is to be hit to trigger action");
Ankita Garg8bb31b92006-10-02 02:17:36 -0700213
Adrian Bunk21181162008-02-06 01:36:50 -0800214static unsigned int jp_do_irq(unsigned int irq)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700215{
216 lkdtm_handler();
217 jprobe_return();
218 return 0;
219}
220
Adrian Bunk21181162008-02-06 01:36:50 -0800221static irqreturn_t jp_handle_irq_event(unsigned int irq,
222 struct irqaction *action)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700223{
224 lkdtm_handler();
225 jprobe_return();
226 return 0;
227}
228
Adrian Bunk21181162008-02-06 01:36:50 -0800229static void jp_tasklet_action(struct softirq_action *a)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700230{
231 lkdtm_handler();
232 jprobe_return();
233}
234
Adrian Bunk21181162008-02-06 01:36:50 -0800235static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
Ankita Garg8bb31b92006-10-02 02:17:36 -0700236{
237 lkdtm_handler();
238 jprobe_return();
239}
240
241struct scan_control;
242
Adrian Bunk21181162008-02-06 01:36:50 -0800243static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
244 struct zone *zone,
245 struct scan_control *sc)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700246{
247 lkdtm_handler();
248 jprobe_return();
249 return 0;
250}
251
Adrian Bunk21181162008-02-06 01:36:50 -0800252static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
253 const enum hrtimer_mode mode)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700254{
255 lkdtm_handler();
256 jprobe_return();
257 return 0;
258}
259
Adrian Bunk21181162008-02-06 01:36:50 -0800260static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700261{
262 lkdtm_handler();
263 jprobe_return();
264 return 0;
265}
266
267#ifdef CONFIG_IDE
Rashika Kheria44629432013-12-13 12:29:42 +0530268static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
Ankita Garg8bb31b92006-10-02 02:17:36 -0700269 struct block_device *bdev, unsigned int cmd,
270 unsigned long arg)
271{
272 lkdtm_handler();
273 jprobe_return();
274 return 0;
275}
276#endif
277
Simon Kagstrom0347af42010-03-05 13:42:49 -0800278/* Return the crashpoint number or NONE if the name is invalid */
279static enum ctype parse_cp_type(const char *what, size_t count)
280{
281 int i;
282
283 for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
284 if (!strcmp(what, cp_type[i]))
285 return i + 1;
286 }
287
Namhyung Kim93e2f582010-10-26 14:22:40 -0700288 return CT_NONE;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800289}
290
291static const char *cp_type_to_str(enum ctype type)
292{
Namhyung Kim93e2f582010-10-26 14:22:40 -0700293 if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
Simon Kagstrom0347af42010-03-05 13:42:49 -0800294 return "None";
295
296 return cp_type[type - 1];
297}
298
299static const char *cp_name_to_str(enum cname name)
300{
Namhyung Kim93e2f582010-10-26 14:22:40 -0700301 if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
Simon Kagstrom0347af42010-03-05 13:42:49 -0800302 return "INVALID";
303
304 return cp_name[name - 1];
305}
306
307
Ankita Garg8bb31b92006-10-02 02:17:36 -0700308static int lkdtm_parse_commandline(void)
309{
310 int i;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700311 unsigned long flags;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700312
Simon Kagstrom0347af42010-03-05 13:42:49 -0800313 if (cpoint_count < 1 || recur_count < 1)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700314 return -EINVAL;
315
Josh Huntaa2c96d2011-06-27 16:18:08 -0700316 spin_lock_irqsave(&count_lock, flags);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700317 count = cpoint_count;
Josh Huntaa2c96d2011-06-27 16:18:08 -0700318 spin_unlock_irqrestore(&count_lock, flags);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700319
Simon Kagstrom0347af42010-03-05 13:42:49 -0800320 /* No special parameters */
321 if (!cpoint_type && !cpoint_name)
322 return 0;
323
324 /* Neither or both of these need to be set */
325 if (!cpoint_type || !cpoint_name)
326 return -EINVAL;
327
328 cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
Namhyung Kim93e2f582010-10-26 14:22:40 -0700329 if (cptype == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -0800330 return -EINVAL;
331
332 for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
333 if (!strcmp(cpoint_name, cp_name[i])) {
334 cpoint = i + 1;
335 return 0;
336 }
337 }
338
339 /* Could not find a valid crash point */
340 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -0700341}
342
Kees Cook7d196ac2013-10-24 09:25:39 -0700343static int recursive_loop(int remaining)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700344{
Kees Cook7d196ac2013-10-24 09:25:39 -0700345 char buf[REC_STACK_SIZE];
Ankita Garg8bb31b92006-10-02 02:17:36 -0700346
Kees Cook7d196ac2013-10-24 09:25:39 -0700347 /* Make sure compiler does not optimize this away. */
348 memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
349 if (!remaining)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700350 return 0;
351 else
Kees Cook7d196ac2013-10-24 09:25:39 -0700352 return recursive_loop(remaining - 1);
Ankita Garg8bb31b92006-10-02 02:17:36 -0700353}
354
Kees Cookcc33c5372013-07-08 10:01:33 -0700355static void do_nothing(void)
356{
357 return;
358}
359
Kees Cookdc2b9e92014-02-09 13:48:48 -0800360/* Must immediately follow do_nothing for size calculuations to work out. */
361static void do_overwritten(void)
362{
363 pr_info("do_overwritten wasn't overwritten!\n");
364 return;
365}
366
Kees Cook629c66a2013-10-24 18:05:42 -0700367static noinline void corrupt_stack(void)
368{
369 /* Use default char array length that triggers stack protection. */
370 char data[8];
371
372 memset((void *)data, 0, 64);
373}
374
Kees Cook9a49a522016-02-22 14:09:29 -0800375static noinline void execute_location(void *dst, bool write)
Kees Cookcc33c5372013-07-08 10:01:33 -0700376{
377 void (*func)(void) = dst;
378
Kees Cookaac416f2014-02-09 13:48:47 -0800379 pr_info("attempting ok execution at %p\n", do_nothing);
380 do_nothing();
381
Kees Cook9a49a522016-02-22 14:09:29 -0800382 if (write) {
383 memcpy(dst, do_nothing, EXEC_SIZE);
384 flush_icache_range((unsigned long)dst,
385 (unsigned long)dst + EXEC_SIZE);
386 }
Kees Cookaac416f2014-02-09 13:48:47 -0800387 pr_info("attempting bad execution at %p\n", func);
Kees Cookcc33c5372013-07-08 10:01:33 -0700388 func();
389}
390
Kees Cook9ae113c2013-10-24 09:25:57 -0700391static void execute_user_location(void *dst)
392{
Kees Cook51236622013-11-11 11:23:49 -0800393 /* Intentionally crossing kernel/user memory boundary. */
Kees Cook9ae113c2013-10-24 09:25:57 -0700394 void (*func)(void) = dst;
395
Kees Cookaac416f2014-02-09 13:48:47 -0800396 pr_info("attempting ok execution at %p\n", do_nothing);
397 do_nothing();
398
Kees Cook51236622013-11-11 11:23:49 -0800399 if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
Kees Cook9ae113c2013-10-24 09:25:57 -0700400 return;
Kees Cookaac416f2014-02-09 13:48:47 -0800401 flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
402 pr_info("attempting bad execution at %p\n", func);
Kees Cook9ae113c2013-10-24 09:25:57 -0700403 func();
404}
405
Kees Cookaa981a62016-06-03 12:06:52 -0700406/*
407 * Instead of adding -Wno-return-local-addr, just pass the stack address
408 * through a function to obfuscate it from the compiler.
409 */
410static noinline unsigned char *trick_compiler(unsigned char *stack)
411{
412 return stack + 0;
413}
414
415static noinline unsigned char *do_usercopy_stack_callee(int value)
416{
417 unsigned char buf[32];
418 int i;
419
420 /* Exercise stack to avoid everything living in registers. */
421 for (i = 0; i < sizeof(buf); i++) {
422 buf[i] = value & 0xff;
423 }
424
425 return trick_compiler(buf);
426}
427
428static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
429{
430 unsigned long user_addr;
431 unsigned char good_stack[32];
432 unsigned char *bad_stack;
433 int i;
434
435 /* Exercise stack to avoid everything living in registers. */
436 for (i = 0; i < sizeof(good_stack); i++)
437 good_stack[i] = test_text[i % sizeof(test_text)];
438
439 /* This is a pointer to outside our current stack frame. */
440 if (bad_frame) {
Kees Cook0edca7b2016-06-26 08:51:14 -0700441 bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack);
Kees Cookaa981a62016-06-03 12:06:52 -0700442 } else {
443 /* Put start address just inside stack. */
444 bad_stack = task_stack_page(current) + THREAD_SIZE;
445 bad_stack -= sizeof(unsigned long);
446 }
447
448 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
449 PROT_READ | PROT_WRITE | PROT_EXEC,
450 MAP_ANONYMOUS | MAP_PRIVATE, 0);
451 if (user_addr >= TASK_SIZE) {
452 pr_warn("Failed to allocate user memory\n");
453 return;
454 }
455
456 if (to_user) {
457 pr_info("attempting good copy_to_user of local stack\n");
458 if (copy_to_user((void __user *)user_addr, good_stack,
459 sizeof(good_stack))) {
460 pr_warn("copy_to_user failed unexpectedly?!\n");
461 goto free_user;
462 }
463
464 pr_info("attempting bad copy_to_user of distant stack\n");
465 if (copy_to_user((void __user *)user_addr, bad_stack,
466 sizeof(good_stack))) {
467 pr_warn("copy_to_user failed, but lacked Oops\n");
468 goto free_user;
469 }
470 } else {
471 /*
472 * There isn't a safe way to not be protected by usercopy
473 * if we're going to write to another thread's stack.
474 */
475 if (!bad_frame)
476 goto free_user;
477
478 pr_info("attempting good copy_from_user of local stack\n");
479 if (copy_from_user(good_stack, (void __user *)user_addr,
480 sizeof(good_stack))) {
481 pr_warn("copy_from_user failed unexpectedly?!\n");
482 goto free_user;
483 }
484
485 pr_info("attempting bad copy_from_user of distant stack\n");
486 if (copy_from_user(bad_stack, (void __user *)user_addr,
487 sizeof(good_stack))) {
488 pr_warn("copy_from_user failed, but lacked Oops\n");
489 goto free_user;
490 }
491 }
492
493free_user:
494 vm_munmap(user_addr, PAGE_SIZE);
495}
496
Kees Cook6c352142016-06-23 22:01:26 -0700497static void do_usercopy_kernel(void)
498{
499 unsigned long user_addr;
500
501 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
502 PROT_READ | PROT_WRITE | PROT_EXEC,
503 MAP_ANONYMOUS | MAP_PRIVATE, 0);
504 if (user_addr >= TASK_SIZE) {
505 pr_warn("Failed to allocate user memory\n");
506 return;
507 }
508
509 pr_info("attempting good copy_to_user from kernel rodata\n");
510 if (copy_to_user((void __user *)user_addr, test_text,
511 sizeof(test_text))) {
512 pr_warn("copy_to_user failed unexpectedly?!\n");
513 goto free_user;
514 }
515
516 pr_info("attempting bad copy_to_user from kernel text\n");
517 if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
518 pr_warn("copy_to_user failed, but lacked Oops\n");
519 goto free_user;
520 }
521
522free_user:
523 vm_munmap(user_addr, PAGE_SIZE);
524}
525
Kees Cookaa981a62016-06-03 12:06:52 -0700526static void do_usercopy_heap_size(bool to_user)
527{
528 unsigned long user_addr;
529 unsigned char *one, *two;
Kees Cook0edca7b2016-06-26 08:51:14 -0700530 size_t size = 1024;
Kees Cookaa981a62016-06-03 12:06:52 -0700531
532 one = kmalloc(size, GFP_KERNEL);
533 two = kmalloc(size, GFP_KERNEL);
534 if (!one || !two) {
535 pr_warn("Failed to allocate kernel memory\n");
536 goto free_kernel;
537 }
538
539 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
540 PROT_READ | PROT_WRITE | PROT_EXEC,
541 MAP_ANONYMOUS | MAP_PRIVATE, 0);
542 if (user_addr >= TASK_SIZE) {
543 pr_warn("Failed to allocate user memory\n");
544 goto free_kernel;
545 }
546
547 memset(one, 'A', size);
548 memset(two, 'B', size);
549
550 if (to_user) {
551 pr_info("attempting good copy_to_user of correct size\n");
552 if (copy_to_user((void __user *)user_addr, one, size)) {
553 pr_warn("copy_to_user failed unexpectedly?!\n");
554 goto free_user;
555 }
556
557 pr_info("attempting bad copy_to_user of too large size\n");
558 if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
559 pr_warn("copy_to_user failed, but lacked Oops\n");
560 goto free_user;
561 }
562 } else {
563 pr_info("attempting good copy_from_user of correct size\n");
Kees Cook0edca7b2016-06-26 08:51:14 -0700564 if (copy_from_user(one, (void __user *)user_addr, size)) {
Kees Cookaa981a62016-06-03 12:06:52 -0700565 pr_warn("copy_from_user failed unexpectedly?!\n");
566 goto free_user;
567 }
568
569 pr_info("attempting bad copy_from_user of too large size\n");
570 if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
571 pr_warn("copy_from_user failed, but lacked Oops\n");
572 goto free_user;
573 }
574 }
575
576free_user:
577 vm_munmap(user_addr, PAGE_SIZE);
578free_kernel:
579 kfree(one);
580 kfree(two);
581}
582
583static void do_usercopy_heap_flag(bool to_user)
584{
585 unsigned long user_addr;
586 unsigned char *good_buf = NULL;
587 unsigned char *bad_buf = NULL;
588
589 /* Make sure cache was prepared. */
590 if (!bad_cache) {
591 pr_warn("Failed to allocate kernel cache\n");
592 return;
593 }
594
595 /*
596 * Allocate one buffer from each cache (kmalloc will have the
597 * SLAB_USERCOPY flag already, but "bad_cache" won't).
598 */
599 good_buf = kmalloc(cache_size, GFP_KERNEL);
600 bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
601 if (!good_buf || !bad_buf) {
602 pr_warn("Failed to allocate buffers from caches\n");
603 goto free_alloc;
604 }
605
606 /* Allocate user memory we'll poke at. */
607 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
608 PROT_READ | PROT_WRITE | PROT_EXEC,
609 MAP_ANONYMOUS | MAP_PRIVATE, 0);
610 if (user_addr >= TASK_SIZE) {
611 pr_warn("Failed to allocate user memory\n");
612 goto free_alloc;
613 }
614
615 memset(good_buf, 'A', cache_size);
616 memset(bad_buf, 'B', cache_size);
617
618 if (to_user) {
619 pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
620 if (copy_to_user((void __user *)user_addr, good_buf,
621 cache_size)) {
622 pr_warn("copy_to_user failed unexpectedly?!\n");
623 goto free_user;
624 }
625
626 pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
627 if (copy_to_user((void __user *)user_addr, bad_buf,
628 cache_size)) {
629 pr_warn("copy_to_user failed, but lacked Oops\n");
630 goto free_user;
631 }
632 } else {
633 pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
634 if (copy_from_user(good_buf, (void __user *)user_addr,
635 cache_size)) {
636 pr_warn("copy_from_user failed unexpectedly?!\n");
637 goto free_user;
638 }
639
640 pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
641 if (copy_from_user(bad_buf, (void __user *)user_addr,
642 cache_size)) {
643 pr_warn("copy_from_user failed, but lacked Oops\n");
644 goto free_user;
645 }
646 }
647
648free_user:
649 vm_munmap(user_addr, PAGE_SIZE);
650free_alloc:
651 if (bad_buf)
652 kmem_cache_free(bad_cache, bad_buf);
653 kfree(good_buf);
654}
655
Simon Kagstrom0347af42010-03-05 13:42:49 -0800656static void lkdtm_do_action(enum ctype which)
Ankita Garg8bb31b92006-10-02 02:17:36 -0700657{
Simon Kagstrom0347af42010-03-05 13:42:49 -0800658 switch (which) {
Namhyung Kim93e2f582010-10-26 14:22:40 -0700659 case CT_PANIC:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800660 panic("dumptest");
661 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700662 case CT_BUG:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800663 BUG();
664 break;
Kees Cook65892722013-07-08 10:01:31 -0700665 case CT_WARNING:
666 WARN_ON(1);
667 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700668 case CT_EXCEPTION:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800669 *((int *) 0) = 0;
670 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700671 case CT_LOOP:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800672 for (;;)
673 ;
674 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700675 case CT_OVERFLOW:
Kees Cook7d196ac2013-10-24 09:25:39 -0700676 (void) recursive_loop(recur_count);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800677 break;
Kees Cook629c66a2013-10-24 18:05:42 -0700678 case CT_CORRUPT_STACK:
679 corrupt_stack();
Simon Kagstrom0347af42010-03-05 13:42:49 -0800680 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700681 case CT_UNALIGNED_LOAD_STORE_WRITE: {
Simon Kagstrom0347af42010-03-05 13:42:49 -0800682 static u8 data[5] __attribute__((aligned(4))) = {1, 2,
683 3, 4, 5};
684 u32 *p;
685 u32 val = 0x12345678;
686
687 p = (u32 *)(data + 1);
688 if (*p == 0)
689 val = 0x87654321;
690 *p = val;
691 break;
692 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700693 case CT_OVERWRITE_ALLOCATION: {
Simon Kagstrom0347af42010-03-05 13:42:49 -0800694 size_t len = 1020;
695 u32 *data = kmalloc(len, GFP_KERNEL);
696
697 data[1024 / sizeof(u32)] = 0x12345678;
698 kfree(data);
699 break;
700 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700701 case CT_WRITE_AFTER_FREE: {
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800702 int *base, *again;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800703 size_t len = 1024;
Laura Abbott250a8982016-02-25 16:36:43 -0800704 /*
705 * The slub allocator uses the first word to store the free
706 * pointer in some configurations. Use the middle of the
707 * allocation to avoid running into the freelist
708 */
709 size_t offset = (len / sizeof(*base)) / 2;
Simon Kagstrom0347af42010-03-05 13:42:49 -0800710
Laura Abbott250a8982016-02-25 16:36:43 -0800711 base = kmalloc(len, GFP_KERNEL);
712 pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
Laura Abbott250a8982016-02-25 16:36:43 -0800713 pr_info("Attempting bad write to freed memory at %p\n",
714 &base[offset]);
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800715 kfree(base);
Laura Abbott250a8982016-02-25 16:36:43 -0800716 base[offset] = 0x0abcdef0;
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800717 /* Attempt to notice the overwrite. */
718 again = kmalloc(len, GFP_KERNEL);
719 kfree(again);
720 if (again != base)
721 pr_info("Hmm, didn't get the same memory range.\n");
722
Simon Kagstrom0347af42010-03-05 13:42:49 -0800723 break;
724 }
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800725 case CT_READ_AFTER_FREE: {
726 int *base, *val, saw;
727 size_t len = 1024;
728 /*
729 * The slub allocator uses the first word to store the free
730 * pointer in some configurations. Use the middle of the
731 * allocation to avoid running into the freelist
732 */
733 size_t offset = (len / sizeof(*base)) / 2;
734
735 base = kmalloc(len, GFP_KERNEL);
736 if (!base)
737 break;
738
739 val = kmalloc(len, GFP_KERNEL);
Sudip Mukherjeed2e10082016-04-05 22:41:06 +0530740 if (!val) {
741 kfree(base);
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800742 break;
Sudip Mukherjeed2e10082016-04-05 22:41:06 +0530743 }
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800744
745 *val = 0x12345678;
746 base[offset] = *val;
747 pr_info("Value in memory before free: %x\n", base[offset]);
748
749 kfree(base);
750
751 pr_info("Attempting bad read from freed memory\n");
752 saw = base[offset];
753 if (saw != *val) {
754 /* Good! Poisoning happened, so declare a win. */
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800755 pr_info("Memory correctly poisoned (%x)\n", saw);
Laura Abbottbc0b8cc2016-02-25 16:36:42 -0800756 BUG();
757 }
758 pr_info("Memory was not poisoned\n");
759
760 kfree(val);
761 break;
762 }
Laura Abbott920d4512016-02-25 16:36:44 -0800763 case CT_WRITE_BUDDY_AFTER_FREE: {
764 unsigned long p = __get_free_page(GFP_KERNEL);
765 if (!p)
766 break;
767 pr_info("Writing to the buddy page before free\n");
768 memset((void *)p, 0x3, PAGE_SIZE);
769 free_page(p);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800770 schedule();
Laura Abbott920d4512016-02-25 16:36:44 -0800771 pr_info("Attempting bad write to the buddy page after free\n");
772 memset((void *)p, 0x78, PAGE_SIZE);
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800773 /* Attempt to notice the overwrite. */
774 p = __get_free_page(GFP_KERNEL);
775 free_page(p);
776 schedule();
777
Laura Abbott920d4512016-02-25 16:36:44 -0800778 break;
779 }
780 case CT_READ_BUDDY_AFTER_FREE: {
781 unsigned long p = __get_free_page(GFP_KERNEL);
Sudip Mukherjee50fbd972016-04-05 22:41:05 +0530782 int saw, *val;
Laura Abbott920d4512016-02-25 16:36:44 -0800783 int *base;
784
785 if (!p)
786 break;
787
Sudip Mukherjee50fbd972016-04-05 22:41:05 +0530788 val = kmalloc(1024, GFP_KERNEL);
Kees Cook3d085c72016-04-06 15:53:27 -0700789 if (!val) {
790 free_page(p);
Laura Abbott920d4512016-02-25 16:36:44 -0800791 break;
Kees Cook3d085c72016-04-06 15:53:27 -0700792 }
Laura Abbott920d4512016-02-25 16:36:44 -0800793
794 base = (int *)p;
795
796 *val = 0x12345678;
797 base[0] = *val;
798 pr_info("Value in memory before free: %x\n", base[0]);
799 free_page(p);
800 pr_info("Attempting to read from freed memory\n");
801 saw = base[0];
802 if (saw != *val) {
803 /* Good! Poisoning happened, so declare a win. */
Kees Cook7c0ae5b2016-02-26 15:27:35 -0800804 pr_info("Memory correctly poisoned (%x)\n", saw);
Laura Abbott920d4512016-02-25 16:36:44 -0800805 BUG();
806 }
807 pr_info("Buddy page was not poisoned\n");
808
809 kfree(val);
Simon Kagstrom0347af42010-03-05 13:42:49 -0800810 break;
811 }
Namhyung Kim93e2f582010-10-26 14:22:40 -0700812 case CT_SOFTLOCKUP:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700813 preempt_disable();
814 for (;;)
815 cpu_relax();
816 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700817 case CT_HARDLOCKUP:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700818 local_irq_disable();
819 for (;;)
820 cpu_relax();
821 break;
Kees Cook274a5852013-07-08 10:01:32 -0700822 case CT_SPINLOCKUP:
823 /* Must be called twice to trigger. */
824 spin_lock(&lock_me_up);
Kees Cook51236622013-11-11 11:23:49 -0800825 /* Let sparse know we intended to exit holding the lock. */
826 __release(&lock_me_up);
Kees Cook274a5852013-07-08 10:01:32 -0700827 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700828 case CT_HUNG_TASK:
Frederic Weisbeckera48223f2010-05-26 14:44:29 -0700829 set_current_state(TASK_UNINTERRUPTIBLE);
830 schedule();
831 break;
Kees Cookcc33c5372013-07-08 10:01:33 -0700832 case CT_EXEC_DATA:
Kees Cook9a49a522016-02-22 14:09:29 -0800833 execute_location(data_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700834 break;
835 case CT_EXEC_STACK: {
836 u8 stack_area[EXEC_SIZE];
Kees Cook9a49a522016-02-22 14:09:29 -0800837 execute_location(stack_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700838 break;
839 }
840 case CT_EXEC_KMALLOC: {
841 u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
Kees Cook9a49a522016-02-22 14:09:29 -0800842 execute_location(kmalloc_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700843 kfree(kmalloc_area);
844 break;
845 }
846 case CT_EXEC_VMALLOC: {
847 u32 *vmalloc_area = vmalloc(EXEC_SIZE);
Kees Cook9a49a522016-02-22 14:09:29 -0800848 execute_location(vmalloc_area, true);
Kees Cookcc33c5372013-07-08 10:01:33 -0700849 vfree(vmalloc_area);
850 break;
851 }
Kees Cook9a49a522016-02-22 14:09:29 -0800852 case CT_EXEC_RODATA:
853 execute_location(lkdtm_rodata_do_nothing, false);
854 break;
Kees Cook9ae113c2013-10-24 09:25:57 -0700855 case CT_EXEC_USERSPACE: {
856 unsigned long user_addr;
857
858 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
859 PROT_READ | PROT_WRITE | PROT_EXEC,
860 MAP_ANONYMOUS | MAP_PRIVATE, 0);
861 if (user_addr >= TASK_SIZE) {
862 pr_warn("Failed to allocate user memory\n");
863 return;
864 }
865 execute_user_location((void *)user_addr);
866 vm_munmap(user_addr, PAGE_SIZE);
867 break;
868 }
869 case CT_ACCESS_USERSPACE: {
Stephen Smalley2cb202c2015-10-27 16:47:53 -0400870 unsigned long user_addr, tmp = 0;
Kees Cook9ae113c2013-10-24 09:25:57 -0700871 unsigned long *ptr;
872
873 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
874 PROT_READ | PROT_WRITE | PROT_EXEC,
875 MAP_ANONYMOUS | MAP_PRIVATE, 0);
876 if (user_addr >= TASK_SIZE) {
877 pr_warn("Failed to allocate user memory\n");
878 return;
879 }
880
Stephen Smalley2cb202c2015-10-27 16:47:53 -0400881 if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
882 pr_warn("copy_to_user failed\n");
883 vm_munmap(user_addr, PAGE_SIZE);
884 return;
885 }
886
Kees Cook9ae113c2013-10-24 09:25:57 -0700887 ptr = (unsigned long *)user_addr;
Kees Cookaac416f2014-02-09 13:48:47 -0800888
889 pr_info("attempting bad read at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700890 tmp = *ptr;
891 tmp += 0xc0dec0de;
Kees Cookaac416f2014-02-09 13:48:47 -0800892
893 pr_info("attempting bad write at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700894 *ptr = tmp;
895
896 vm_munmap(user_addr, PAGE_SIZE);
897
898 break;
899 }
900 case CT_WRITE_RO: {
Kees Cook7cca0712016-02-17 14:41:16 -0800901 /* Explicitly cast away "const" for the test. */
902 unsigned long *ptr = (unsigned long *)&rodata;
Kees Cook9ae113c2013-10-24 09:25:57 -0700903
Kees Cook7cca0712016-02-17 14:41:16 -0800904 pr_info("attempting bad rodata write at %p\n", ptr);
905 *ptr ^= 0xabcd1234;
Kees Cookaac416f2014-02-09 13:48:47 -0800906
Kees Cook7cca0712016-02-17 14:41:16 -0800907 break;
908 }
909 case CT_WRITE_RO_AFTER_INIT: {
910 unsigned long *ptr = &ro_after_init;
911
912 /*
913 * Verify we were written to during init. Since an Oops
914 * is considered a "success", a failure is to just skip the
915 * real test.
916 */
917 if ((*ptr & 0xAA) != 0xAA) {
918 pr_info("%p was NOT written during init!?\n", ptr);
919 break;
920 }
921
922 pr_info("attempting bad ro_after_init write at %p\n", ptr);
Kees Cook9ae113c2013-10-24 09:25:57 -0700923 *ptr ^= 0xabcd1234;
924
925 break;
926 }
Kees Cookdc2b9e92014-02-09 13:48:48 -0800927 case CT_WRITE_KERN: {
928 size_t size;
929 unsigned char *ptr;
930
931 size = (unsigned long)do_overwritten -
932 (unsigned long)do_nothing;
933 ptr = (unsigned char *)do_overwritten;
934
935 pr_info("attempting bad %zu byte write at %p\n", size, ptr);
936 memcpy(ptr, (unsigned char *)do_nothing, size);
937 flush_icache_range((unsigned long)ptr,
938 (unsigned long)(ptr + size));
939
940 do_overwritten();
941 break;
942 }
Kees Cookb5484522016-06-07 14:27:02 -0700943 case CT_ATOMIC_UNDERFLOW: {
David Windsor5fd9e482015-12-17 00:56:36 -0500944 atomic_t under = ATOMIC_INIT(INT_MIN);
Kees Cookb5484522016-06-07 14:27:02 -0700945
946 pr_info("attempting good atomic increment\n");
947 atomic_inc(&under);
948 atomic_dec(&under);
949
950 pr_info("attempting bad atomic underflow\n");
951 atomic_dec(&under);
952 break;
953 }
954 case CT_ATOMIC_OVERFLOW: {
David Windsor5fd9e482015-12-17 00:56:36 -0500955 atomic_t over = ATOMIC_INIT(INT_MAX);
956
Kees Cookb5484522016-06-07 14:27:02 -0700957 pr_info("attempting good atomic decrement\n");
958 atomic_dec(&over);
959 atomic_inc(&over);
960
961 pr_info("attempting bad atomic overflow\n");
David Windsor5fd9e482015-12-17 00:56:36 -0500962 atomic_inc(&over);
963
964 return;
965 }
Kees Cookaa981a62016-06-03 12:06:52 -0700966 case CT_USERCOPY_HEAP_SIZE_TO:
967 do_usercopy_heap_size(true);
968 break;
969 case CT_USERCOPY_HEAP_SIZE_FROM:
970 do_usercopy_heap_size(false);
971 break;
972 case CT_USERCOPY_HEAP_FLAG_TO:
973 do_usercopy_heap_flag(true);
974 break;
975 case CT_USERCOPY_HEAP_FLAG_FROM:
976 do_usercopy_heap_flag(false);
977 break;
978 case CT_USERCOPY_STACK_FRAME_TO:
979 do_usercopy_stack(true, true);
980 break;
981 case CT_USERCOPY_STACK_FRAME_FROM:
982 do_usercopy_stack(false, true);
983 break;
984 case CT_USERCOPY_STACK_BEYOND:
985 do_usercopy_stack(true, false);
986 break;
Kees Cook6c352142016-06-23 22:01:26 -0700987 case CT_USERCOPY_KERNEL:
988 do_usercopy_kernel();
989 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -0700990 case CT_NONE:
Simon Kagstrom0347af42010-03-05 13:42:49 -0800991 default:
992 break;
993 }
994
995}
996
997static void lkdtm_handler(void)
998{
Josh Huntaa2c96d2011-06-27 16:18:08 -0700999 unsigned long flags;
Cong Wang92618182012-02-03 15:37:15 -08001000 bool do_it = false;
Josh Huntaa2c96d2011-06-27 16:18:08 -07001001
1002 spin_lock_irqsave(&count_lock, flags);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001003 count--;
Kees Cookfeac6e22014-02-09 13:48:46 -08001004 pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
1005 cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
Ankita Garg8bb31b92006-10-02 02:17:36 -07001006
1007 if (count == 0) {
Cong Wang92618182012-02-03 15:37:15 -08001008 do_it = true;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001009 count = cpoint_count;
1010 }
Josh Huntaa2c96d2011-06-27 16:18:08 -07001011 spin_unlock_irqrestore(&count_lock, flags);
Cong Wang92618182012-02-03 15:37:15 -08001012
1013 if (do_it)
1014 lkdtm_do_action(cptype);
Ankita Garg8bb31b92006-10-02 02:17:36 -07001015}
1016
Simon Kagstrom0347af42010-03-05 13:42:49 -08001017static int lkdtm_register_cpoint(enum cname which)
Ankita Garg8bb31b92006-10-02 02:17:36 -07001018{
1019 int ret;
1020
Namhyung Kim93e2f582010-10-26 14:22:40 -07001021 cpoint = CN_INVALID;
Simon Kagstrom0347af42010-03-05 13:42:49 -08001022 if (lkdtm.entry != NULL)
1023 unregister_jprobe(&lkdtm);
Ankita Garg8bb31b92006-10-02 02:17:36 -07001024
Simon Kagstrom0347af42010-03-05 13:42:49 -08001025 switch (which) {
Namhyung Kim93e2f582010-10-26 14:22:40 -07001026 case CN_DIRECT:
Simon Kagstrom0347af42010-03-05 13:42:49 -08001027 lkdtm_do_action(cptype);
1028 return 0;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001029 case CN_INT_HARDWARE_ENTRY:
M. Mohan Kumarf58f2fa2009-09-22 16:43:29 -07001030 lkdtm.kp.symbol_name = "do_IRQ";
Ankita Garg8bb31b92006-10-02 02:17:36 -07001031 lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
1032 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001033 case CN_INT_HW_IRQ_EN:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001034 lkdtm.kp.symbol_name = "handle_IRQ_event";
1035 lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
1036 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001037 case CN_INT_TASKLET_ENTRY:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001038 lkdtm.kp.symbol_name = "tasklet_action";
1039 lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
1040 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001041 case CN_FS_DEVRW:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001042 lkdtm.kp.symbol_name = "ll_rw_block";
1043 lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
1044 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001045 case CN_MEM_SWAPOUT:
Ankita Garg18a61e42006-11-05 23:52:07 -08001046 lkdtm.kp.symbol_name = "shrink_inactive_list";
1047 lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001048 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001049 case CN_TIMERADD:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001050 lkdtm.kp.symbol_name = "hrtimer_start";
1051 lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
1052 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001053 case CN_SCSI_DISPATCH_CMD:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001054 lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
1055 lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
1056 break;
Namhyung Kim93e2f582010-10-26 14:22:40 -07001057 case CN_IDE_CORE_CP:
Ankita Garg8bb31b92006-10-02 02:17:36 -07001058#ifdef CONFIG_IDE
1059 lkdtm.kp.symbol_name = "generic_ide_ioctl";
1060 lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
1061#else
Kees Cookfeac6e22014-02-09 13:48:46 -08001062 pr_info("Crash point not available\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001063 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001064#endif
1065 break;
1066 default:
Kees Cookfeac6e22014-02-09 13:48:46 -08001067 pr_info("Invalid Crash Point\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001068 return -EINVAL;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001069 }
1070
Simon Kagstrom0347af42010-03-05 13:42:49 -08001071 cpoint = which;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001072 if ((ret = register_jprobe(&lkdtm)) < 0) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001073 pr_info("Couldn't register jprobe\n");
Namhyung Kim93e2f582010-10-26 14:22:40 -07001074 cpoint = CN_INVALID;
Ankita Garg8bb31b92006-10-02 02:17:36 -07001075 }
1076
Simon Kagstrom0347af42010-03-05 13:42:49 -08001077 return ret;
1078}
1079
1080static ssize_t do_register_entry(enum cname which, struct file *f,
1081 const char __user *user_buf, size_t count, loff_t *off)
1082{
1083 char *buf;
1084 int err;
1085
1086 if (count >= PAGE_SIZE)
1087 return -EINVAL;
1088
1089 buf = (char *)__get_free_page(GFP_KERNEL);
1090 if (!buf)
1091 return -ENOMEM;
1092 if (copy_from_user(buf, user_buf, count)) {
1093 free_page((unsigned long) buf);
1094 return -EFAULT;
1095 }
1096 /* NULL-terminate and remove enter */
1097 buf[count] = '\0';
1098 strim(buf);
1099
1100 cptype = parse_cp_type(buf, count);
1101 free_page((unsigned long) buf);
1102
Namhyung Kim93e2f582010-10-26 14:22:40 -07001103 if (cptype == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -08001104 return -EINVAL;
1105
1106 err = lkdtm_register_cpoint(which);
1107 if (err < 0)
1108 return err;
1109
1110 *off += count;
1111
1112 return count;
1113}
1114
1115/* Generic read callback that just prints out the available crash types */
1116static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
1117 size_t count, loff_t *off)
1118{
1119 char *buf;
1120 int i, n, out;
1121
1122 buf = (char *)__get_free_page(GFP_KERNEL);
Alan Cox086ff4b2012-07-30 14:43:24 -07001123 if (buf == NULL)
1124 return -ENOMEM;
Simon Kagstrom0347af42010-03-05 13:42:49 -08001125
1126 n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
1127 for (i = 0; i < ARRAY_SIZE(cp_type); i++)
1128 n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
1129 buf[n] = '\0';
1130
1131 out = simple_read_from_buffer(user_buf, count, off,
1132 buf, n);
1133 free_page((unsigned long) buf);
1134
1135 return out;
1136}
1137
1138static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
1139{
Ankita Garg8bb31b92006-10-02 02:17:36 -07001140 return 0;
1141}
1142
Simon Kagstrom0347af42010-03-05 13:42:49 -08001143
1144static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
1145 size_t count, loff_t *off)
1146{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001147 return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001148}
1149
1150static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
1151 size_t count, loff_t *off)
1152{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001153 return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001154}
1155
1156static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
1157 size_t count, loff_t *off)
1158{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001159 return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001160}
1161
1162static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
1163 size_t count, loff_t *off)
1164{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001165 return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001166}
1167
1168static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
1169 size_t count, loff_t *off)
1170{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001171 return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001172}
1173
1174static ssize_t timeradd_entry(struct file *f, const char __user *buf,
1175 size_t count, loff_t *off)
1176{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001177 return do_register_entry(CN_TIMERADD, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001178}
1179
1180static ssize_t scsi_dispatch_cmd_entry(struct file *f,
1181 const char __user *buf, size_t count, loff_t *off)
1182{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001183 return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001184}
1185
1186static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
1187 size_t count, loff_t *off)
1188{
Namhyung Kim93e2f582010-10-26 14:22:40 -07001189 return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001190}
1191
1192/* Special entry to just crash directly. Available without KPROBEs */
1193static ssize_t direct_entry(struct file *f, const char __user *user_buf,
1194 size_t count, loff_t *off)
1195{
1196 enum ctype type;
1197 char *buf;
1198
1199 if (count >= PAGE_SIZE)
1200 return -EINVAL;
1201 if (count < 1)
1202 return -EINVAL;
1203
1204 buf = (char *)__get_free_page(GFP_KERNEL);
1205 if (!buf)
1206 return -ENOMEM;
1207 if (copy_from_user(buf, user_buf, count)) {
1208 free_page((unsigned long) buf);
1209 return -EFAULT;
1210 }
1211 /* NULL-terminate and remove enter */
1212 buf[count] = '\0';
1213 strim(buf);
1214
1215 type = parse_cp_type(buf, count);
1216 free_page((unsigned long) buf);
Namhyung Kim93e2f582010-10-26 14:22:40 -07001217 if (type == CT_NONE)
Simon Kagstrom0347af42010-03-05 13:42:49 -08001218 return -EINVAL;
1219
Kees Cookfeac6e22014-02-09 13:48:46 -08001220 pr_info("Performing direct entry %s\n", cp_type_to_str(type));
Simon Kagstrom0347af42010-03-05 13:42:49 -08001221 lkdtm_do_action(type);
1222 *off += count;
1223
1224 return count;
1225}
1226
1227struct crash_entry {
1228 const char *name;
1229 const struct file_operations fops;
1230};
1231
1232static const struct crash_entry crash_entries[] = {
1233 {"DIRECT", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001234 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001235 .open = lkdtm_debugfs_open,
1236 .write = direct_entry} },
1237 {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001238 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001239 .open = lkdtm_debugfs_open,
1240 .write = int_hardware_entry} },
1241 {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001242 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001243 .open = lkdtm_debugfs_open,
1244 .write = int_hw_irq_en} },
1245 {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001246 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001247 .open = lkdtm_debugfs_open,
1248 .write = int_tasklet_entry} },
1249 {"FS_DEVRW", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001250 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001251 .open = lkdtm_debugfs_open,
1252 .write = fs_devrw_entry} },
1253 {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001254 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001255 .open = lkdtm_debugfs_open,
1256 .write = mem_swapout_entry} },
1257 {"TIMERADD", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001258 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001259 .open = lkdtm_debugfs_open,
1260 .write = timeradd_entry} },
1261 {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001262 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001263 .open = lkdtm_debugfs_open,
1264 .write = scsi_dispatch_cmd_entry} },
1265 {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
Arnd Bergmann05271ec2010-07-06 19:10:26 +02001266 .llseek = generic_file_llseek,
Simon Kagstrom0347af42010-03-05 13:42:49 -08001267 .open = lkdtm_debugfs_open,
1268 .write = ide_core_cp_entry} },
1269};
1270
1271static struct dentry *lkdtm_debugfs_root;
1272
1273static int __init lkdtm_module_init(void)
1274{
1275 int ret = -EINVAL;
1276 int n_debugfs_entries = 1; /* Assume only the direct entry */
1277 int i;
1278
Kees Cook7cca0712016-02-17 14:41:16 -08001279 /* Make sure we can write to __ro_after_init values during __init */
1280 ro_after_init |= 0xAA;
1281
Kees Cookaa981a62016-06-03 12:06:52 -07001282 /* Prepare cache that lacks SLAB_USERCOPY flag. */
Kees Cookaa981a62016-06-03 12:06:52 -07001283 bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
1284 0, NULL);
1285
Simon Kagstrom0347af42010-03-05 13:42:49 -08001286 /* Register debugfs interface */
1287 lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
1288 if (!lkdtm_debugfs_root) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001289 pr_err("creating root dir failed\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001290 return -ENODEV;
1291 }
1292
1293#ifdef CONFIG_KPROBES
1294 n_debugfs_entries = ARRAY_SIZE(crash_entries);
1295#endif
1296
1297 for (i = 0; i < n_debugfs_entries; i++) {
1298 const struct crash_entry *cur = &crash_entries[i];
1299 struct dentry *de;
1300
1301 de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
1302 NULL, &cur->fops);
1303 if (de == NULL) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001304 pr_err("could not create %s\n", cur->name);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001305 goto out_err;
1306 }
1307 }
1308
1309 if (lkdtm_parse_commandline() == -EINVAL) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001310 pr_info("Invalid command\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001311 goto out_err;
1312 }
1313
Namhyung Kim93e2f582010-10-26 14:22:40 -07001314 if (cpoint != CN_INVALID && cptype != CT_NONE) {
Simon Kagstrom0347af42010-03-05 13:42:49 -08001315 ret = lkdtm_register_cpoint(cpoint);
1316 if (ret < 0) {
Kees Cookfeac6e22014-02-09 13:48:46 -08001317 pr_info("Invalid crash point %d\n", cpoint);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001318 goto out_err;
1319 }
Kees Cookfeac6e22014-02-09 13:48:46 -08001320 pr_info("Crash point %s of type %s registered\n",
1321 cpoint_name, cpoint_type);
Simon Kagstrom0347af42010-03-05 13:42:49 -08001322 } else {
Kees Cookfeac6e22014-02-09 13:48:46 -08001323 pr_info("No crash points registered, enable through debugfs\n");
Simon Kagstrom0347af42010-03-05 13:42:49 -08001324 }
1325
1326 return 0;
1327
1328out_err:
1329 debugfs_remove_recursive(lkdtm_debugfs_root);
1330 return ret;
1331}
1332
Adrian Bunk21181162008-02-06 01:36:50 -08001333static void __exit lkdtm_module_exit(void)
Ankita Garg8bb31b92006-10-02 02:17:36 -07001334{
Simon Kagstrom0347af42010-03-05 13:42:49 -08001335 debugfs_remove_recursive(lkdtm_debugfs_root);
1336
Kees Cookaa981a62016-06-03 12:06:52 -07001337 kmem_cache_destroy(bad_cache);
1338
Simon Kagstrom0347af42010-03-05 13:42:49 -08001339 unregister_jprobe(&lkdtm);
Kees Cookfeac6e22014-02-09 13:48:46 -08001340 pr_info("Crash point unregistered\n");
Ankita Garg8bb31b92006-10-02 02:17:36 -07001341}
1342
1343module_init(lkdtm_module_init);
1344module_exit(lkdtm_module_exit);
1345
1346MODULE_LICENSE("GPL");
Terry Chiada869202014-07-02 21:02:25 +08001347MODULE_DESCRIPTION("Kprobe module for testing crash dumps");