blob: fc5b3a1ec6664c4c9973e252459cf01e8c0f83f4 [file] [log] [blame]
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +10001/*
2 * address space "slices" (meta-segments) support
3 *
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
5 *
6 * Based on hugetlb implementation
7 *
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#undef DEBUG
26
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/pagemap.h>
30#include <linux/err.h>
31#include <linux/spinlock.h>
Paul Gortmaker4b16f8e2011-07-22 18:24:23 -040032#include <linux/export.h>
Anton Blanchard1217d342014-08-20 08:55:19 +100033#include <linux/hugetlb.h>
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100034#include <asm/mman.h>
35#include <asm/mmu.h>
Ian Munsiebe3ebfe2014-10-08 19:54:52 +110036#include <asm/copro.h>
Anton Blanchard1217d342014-08-20 08:55:19 +100037#include <asm/hugetlb.h>
Aneesh Kumar K.V032900e2018-04-10 14:21:26 +053038#include <asm/mmu_context.h>
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100039
Roel Kluinf7a75f02007-10-16 23:30:25 -070040static DEFINE_SPINLOCK(slice_convert_lock);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100041
42#ifdef DEBUG
43int _slice_debug = 1;
44
Nicholas Piggin830fd2d2018-03-07 11:37:11 +100045static void slice_print_mask(const char *label, const struct slice_mask *mask)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100046{
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100047 if (!_slice_debug)
48 return;
Nicholas Piggin830fd2d2018-03-07 11:37:11 +100049 pr_devel("%s low_slice: %*pbl\n", label,
50 (int)SLICE_NUM_LOW, &mask->low_slices);
51 pr_devel("%s high_slice: %*pbl\n", label,
52 (int)SLICE_NUM_HIGH, mask->high_slices);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100053}
54
Aneesh Kumar K.V302413c2017-03-22 09:06:52 +053055#define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100056
57#else
58
Nicholas Piggin830fd2d2018-03-07 11:37:11 +100059static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100060#define slice_dbg(fmt...)
61
62#endif
63
Aneesh Kumar K.Va4d362152017-03-22 09:06:48 +053064static void slice_range_to_mask(unsigned long start, unsigned long len,
65 struct slice_mask *ret)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100066{
67 unsigned long end = start + len - 1;
Aneesh Kumar K.Vf3207c12017-03-22 09:06:47 +053068
Aneesh Kumar K.Va4d362152017-03-22 09:06:48 +053069 ret->low_slices = 0;
Christophe Leroydb3a5282018-02-22 15:27:24 +010070 if (SLICE_NUM_HIGH)
71 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100072
73 if (start < SLICE_LOW_TOP) {
Christophe Leroydb3a5282018-02-22 15:27:24 +010074 unsigned long mend = min(end,
75 (unsigned long)(SLICE_LOW_TOP - 1));
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100076
Aneesh Kumar K.Va4d362152017-03-22 09:06:48 +053077 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
Aneesh Kumar K.V98beda72017-03-21 22:59:52 +053078 - (1u << GET_LOW_SLICE_INDEX(start));
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100079 }
80
Aneesh Kumar K.Vf3207c12017-03-22 09:06:47 +053081 if ((start + len) > SLICE_LOW_TOP) {
82 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
83 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
84 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100085
Aneesh Kumar K.Va4d362152017-03-22 09:06:48 +053086 bitmap_set(ret->high_slices, start_index, count);
Aneesh Kumar K.Vf3207c12017-03-22 09:06:47 +053087 }
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100088}
89
90static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
91 unsigned long len)
92{
93 struct vm_area_struct *vma;
94
Nicholas Piggin47224762017-11-10 04:27:40 +110095 if ((mm->context.slb_addr_limit - len) < addr)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100096 return 0;
97 vma = find_vma(mm, addr);
Hugh Dickins1be71072017-06-19 04:03:24 -070098 return (!vma || (addr + len) <= vm_start_gap(vma));
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +100099}
100
101static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
102{
103 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
104 1ul << SLICE_LOW_SHIFT);
105}
106
107static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
108{
109 unsigned long start = slice << SLICE_HIGH_SHIFT;
110 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
111
Christophe Leroydb3a5282018-02-22 15:27:24 +0100112#ifdef CONFIG_PPC64
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000113 /* Hack, so that each addresses is controlled by exactly one
114 * of the high or low area bitmaps, the first high area starts
115 * at 4GB, not 0 */
116 if (start == 0)
117 start = SLICE_LOW_TOP;
Christophe Leroydb3a5282018-02-22 15:27:24 +0100118#endif
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000119
120 return !slice_area_is_free(mm, start, end - start);
121}
122
Aneesh Kumar K.V7a06c662017-11-10 10:25:07 +0530123static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
124 unsigned long high_limit)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000125{
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000126 unsigned long i;
127
Aneesh Kumar K.Va4d362152017-03-22 09:06:48 +0530128 ret->low_slices = 0;
Christophe Leroydb3a5282018-02-22 15:27:24 +0100129 if (SLICE_NUM_HIGH)
130 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
Aneesh Kumar K.Vf3207c12017-03-22 09:06:47 +0530131
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000132 for (i = 0; i < SLICE_NUM_LOW; i++)
133 if (!slice_low_has_vma(mm, i))
Aneesh Kumar K.Va4d362152017-03-22 09:06:48 +0530134 ret->low_slices |= 1u << i;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000135
Aneesh Kumar K.V7a06c662017-11-10 10:25:07 +0530136 if (high_limit <= SLICE_LOW_TOP)
Aneesh Kumar K.Va4d362152017-03-22 09:06:48 +0530137 return;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000138
Aneesh Kumar K.V7a06c662017-11-10 10:25:07 +0530139 for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000140 if (!slice_high_has_vma(mm, i))
Aneesh Kumar K.Va4d362152017-03-22 09:06:48 +0530141 __set_bit(i, ret->high_slices);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000142}
143
Nicholas Piggin5709f7c2018-03-07 11:37:12 +1000144#ifdef CONFIG_PPC_BOOK3S_64
145static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000146{
Nicholas Piggin5709f7c2018-03-07 11:37:12 +1000147#ifdef CONFIG_PPC_64K_PAGES
148 if (psize == MMU_PAGE_64K)
149 return &mm->context.mask_64k;
150#endif
151 if (psize == MMU_PAGE_4K)
152 return &mm->context.mask_4k;
153#ifdef CONFIG_HUGETLB_PAGE
154 if (psize == MMU_PAGE_16M)
155 return &mm->context.mask_16m;
156 if (psize == MMU_PAGE_16G)
157 return &mm->context.mask_16g;
158#endif
159 BUG();
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000160}
Nicholas Piggin5709f7c2018-03-07 11:37:12 +1000161#elif defined(CONFIG_PPC_8xx)
162static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
163{
164 if (psize == mmu_virtual_psize)
165 return &mm->context.mask_base_psize;
166#ifdef CONFIG_HUGETLB_PAGE
167 if (psize == MMU_PAGE_512K)
168 return &mm->context.mask_512k;
169 if (psize == MMU_PAGE_8M)
170 return &mm->context.mask_8m;
171#endif
172 BUG();
173}
174#else
175#error "Must define the slice masks for page sizes supported by the platform"
176#endif
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000177
Nicholas Pigginae3066b2018-03-07 11:37:13 +1000178static bool slice_check_range_fits(struct mm_struct *mm,
179 const struct slice_mask *available,
180 unsigned long start, unsigned long len)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000181{
Nicholas Pigginae3066b2018-03-07 11:37:13 +1000182 unsigned long end = start + len - 1;
183 u64 low_slices = 0;
Aneesh Kumar K.Vf3207c12017-03-22 09:06:47 +0530184
Nicholas Pigginae3066b2018-03-07 11:37:13 +1000185 if (start < SLICE_LOW_TOP) {
186 unsigned long mend = min(end,
187 (unsigned long)(SLICE_LOW_TOP - 1));
Christophe Leroydb3a5282018-02-22 15:27:24 +0100188
Nicholas Pigginae3066b2018-03-07 11:37:13 +1000189 low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
190 - (1u << GET_LOW_SLICE_INDEX(start));
191 }
192 if ((low_slices & available->low_slices) != low_slices)
193 return false;
Aneesh Kumar K.Vf3207c12017-03-22 09:06:47 +0530194
Nicholas Pigginae3066b2018-03-07 11:37:13 +1000195 if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) {
196 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
197 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
198 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
199 unsigned long i;
200
201 for (i = start_index; i < start_index + count; i++) {
202 if (!test_bit(i, available->high_slices))
203 return false;
204 }
205 }
206
207 return true;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000208}
209
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000210static void slice_convert(struct mm_struct *mm,
211 const struct slice_mask *mask, int psize)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000212{
Aneesh Kumar K.V7aa07272012-09-10 02:52:52 +0000213 int index, mask_index;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000214 /* Write the new slice psize bits */
Christophe Leroy15472422018-02-22 15:27:28 +0100215 unsigned char *hpsizes, *lpsizes;
Nicholas Piggin5709f7c2018-03-07 11:37:12 +1000216 struct slice_mask *psize_mask, *old_mask;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000217 unsigned long i, flags;
Nicholas Piggin5709f7c2018-03-07 11:37:12 +1000218 int old_psize;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000219
220 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
221 slice_print_mask(" mask", mask);
222
Nicholas Piggin5709f7c2018-03-07 11:37:12 +1000223 psize_mask = slice_mask_for_size(mm, psize);
224
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000225 /* We need to use a spinlock here to protect against
226 * concurrent 64k -> 4k demotion ...
227 */
228 spin_lock_irqsave(&slice_convert_lock, flags);
229
230 lpsizes = mm->context.low_slices_psize;
Nicholas Piggin5a807e02018-03-07 11:37:10 +1000231 for (i = 0; i < SLICE_NUM_LOW; i++) {
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000232 if (!(mask->low_slices & (1u << i)))
Nicholas Piggin5a807e02018-03-07 11:37:10 +1000233 continue;
234
235 mask_index = i & 0x1;
236 index = i >> 1;
Nicholas Piggin5709f7c2018-03-07 11:37:12 +1000237
238 /* Update the slice_mask */
239 old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
240 old_mask = slice_mask_for_size(mm, old_psize);
241 old_mask->low_slices &= ~(1u << i);
242 psize_mask->low_slices |= 1u << i;
243
244 /* Update the sizes array */
Nicholas Piggin5a807e02018-03-07 11:37:10 +1000245 lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
Christophe Leroy15472422018-02-22 15:27:28 +0100246 (((unsigned long)psize) << (mask_index * 4));
Nicholas Piggin5a807e02018-03-07 11:37:10 +1000247 }
Aneesh Kumar K.V7aa07272012-09-10 02:52:52 +0000248
249 hpsizes = mm->context.high_slices_psize;
Nicholas Piggin47224762017-11-10 04:27:40 +1100250 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000251 if (!test_bit(i, mask->high_slices))
Nicholas Piggin5a807e02018-03-07 11:37:10 +1000252 continue;
253
Aneesh Kumar K.V7aa07272012-09-10 02:52:52 +0000254 mask_index = i & 0x1;
255 index = i >> 1;
Nicholas Piggin5709f7c2018-03-07 11:37:12 +1000256
257 /* Update the slice_mask */
258 old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
259 old_mask = slice_mask_for_size(mm, old_psize);
260 __clear_bit(i, old_mask->high_slices);
261 __set_bit(i, psize_mask->high_slices);
262
263 /* Update the sizes array */
Nicholas Piggin5a807e02018-03-07 11:37:10 +1000264 hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
Aneesh Kumar K.V7aa07272012-09-10 02:52:52 +0000265 (((unsigned long)psize) << (mask_index * 4));
266 }
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000267
268 slice_dbg(" lsps=%lx, hsps=%lx\n",
Aneesh Kumar K.V302413c2017-03-22 09:06:52 +0530269 (unsigned long)mm->context.low_slices_psize,
270 (unsigned long)mm->context.high_slices_psize);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000271
272 spin_unlock_irqrestore(&slice_convert_lock, flags);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000273
Ian Munsiebe3ebfe2014-10-08 19:54:52 +1100274 copro_flush_all_slbs(mm);
Nicholas Piggin8fed04d2018-09-15 01:30:52 +1000275#ifdef CONFIG_PPC64
276 core_flush_all_slbs(mm);
277#endif
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000278}
279
Michel Lespinassefba23692013-04-29 11:53:53 -0700280/*
281 * Compute which slice addr is part of;
282 * set *boundary_addr to the start or end boundary of that slice
283 * (depending on 'end' parameter);
284 * return boolean indicating if the slice is marked as available in the
285 * 'available' slice_mark.
286 */
287static bool slice_scan_available(unsigned long addr,
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000288 const struct slice_mask *available,
289 int end, unsigned long *boundary_addr)
Michel Lespinassefba23692013-04-29 11:53:53 -0700290{
291 unsigned long slice;
292 if (addr < SLICE_LOW_TOP) {
293 slice = GET_LOW_SLICE_INDEX(addr);
294 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000295 return !!(available->low_slices & (1u << slice));
Michel Lespinassefba23692013-04-29 11:53:53 -0700296 } else {
297 slice = GET_HIGH_SLICE_INDEX(addr);
298 *boundary_addr = (slice + end) ?
299 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000300 return !!test_bit(slice, available->high_slices);
Michel Lespinassefba23692013-04-29 11:53:53 -0700301 }
302}
303
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000304static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
305 unsigned long len,
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000306 const struct slice_mask *available,
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530307 int psize, unsigned long high_limit)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000308{
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000309 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
Michel Lespinassefba23692013-04-29 11:53:53 -0700310 unsigned long addr, found, next_end;
311 struct vm_unmapped_area_info info;
312
313 info.flags = 0;
314 info.length = len;
315 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
316 info.align_offset = 0;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000317
Michel Lespinasse34d07172013-04-29 11:53:52 -0700318 addr = TASK_UNMAPPED_BASE;
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530319 /*
320 * Check till the allow max value for this mmap request
321 */
322 while (addr < high_limit) {
Michel Lespinassefba23692013-04-29 11:53:53 -0700323 info.low_limit = addr;
324 if (!slice_scan_available(addr, available, 1, &addr))
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000325 continue;
Michel Lespinassefba23692013-04-29 11:53:53 -0700326
327 next_slice:
328 /*
329 * At this point [info.low_limit; addr) covers
330 * available slices only and ends at a slice boundary.
331 * Check if we need to reduce the range, or if we can
332 * extend it to cover the next available slice.
333 */
Aneesh Kumar K.Vbe77e992017-04-14 00:48:21 +0530334 if (addr >= high_limit)
335 addr = high_limit;
Michel Lespinassefba23692013-04-29 11:53:53 -0700336 else if (slice_scan_available(addr, available, 1, &next_end)) {
337 addr = next_end;
338 goto next_slice;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000339 }
Michel Lespinassefba23692013-04-29 11:53:53 -0700340 info.high_limit = addr;
341
342 found = vm_unmapped_area(&info);
343 if (!(found & ~PAGE_MASK))
344 return found;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000345 }
346
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000347 return -ENOMEM;
348}
349
350static unsigned long slice_find_area_topdown(struct mm_struct *mm,
351 unsigned long len,
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000352 const struct slice_mask *available,
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530353 int psize, unsigned long high_limit)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000354{
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000355 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
Michel Lespinassefba23692013-04-29 11:53:53 -0700356 unsigned long addr, found, prev;
357 struct vm_unmapped_area_info info;
358
359 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
360 info.length = len;
361 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
362 info.align_offset = 0;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000363
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000364 addr = mm->mmap_base;
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530365 /*
366 * If we are trying to allocate above DEFAULT_MAP_WINDOW
367 * Add the different to the mmap_base.
368 * Only for that request for which high_limit is above
369 * DEFAULT_MAP_WINDOW we should apply this.
370 */
Nicholas Piggin47224762017-11-10 04:27:40 +1100371 if (high_limit > DEFAULT_MAP_WINDOW)
372 addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530373
Michel Lespinassefba23692013-04-29 11:53:53 -0700374 while (addr > PAGE_SIZE) {
375 info.high_limit = addr;
376 if (!slice_scan_available(addr - 1, available, 0, &addr))
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000377 continue;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000378
Michel Lespinassefba23692013-04-29 11:53:53 -0700379 prev_slice:
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000380 /*
Michel Lespinassefba23692013-04-29 11:53:53 -0700381 * At this point [addr; info.high_limit) covers
382 * available slices only and starts at a slice boundary.
383 * Check if we need to reduce the range, or if we can
384 * extend it to cover the previous available slice.
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000385 */
Michel Lespinassefba23692013-04-29 11:53:53 -0700386 if (addr < PAGE_SIZE)
387 addr = PAGE_SIZE;
388 else if (slice_scan_available(addr - 1, available, 0, &prev)) {
389 addr = prev;
390 goto prev_slice;
391 }
392 info.low_limit = addr;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000393
Michel Lespinassefba23692013-04-29 11:53:53 -0700394 found = vm_unmapped_area(&info);
395 if (!(found & ~PAGE_MASK))
396 return found;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000397 }
398
399 /*
400 * A failed mmap() very likely causes application failure,
401 * so fall back to the bottom-up function here. This scenario
402 * can happen with large stack limits and large mmap()
403 * allocations.
404 */
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530405 return slice_find_area_bottomup(mm, len, available, psize, high_limit);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000406}
407
408
409static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000410 const struct slice_mask *mask, int psize,
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530411 int topdown, unsigned long high_limit)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000412{
413 if (topdown)
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530414 return slice_find_area_topdown(mm, len, mask, psize, high_limit);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000415 else
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530416 return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000417}
418
Nicholas Pigginb8c93542018-03-07 11:37:14 +1000419static inline void slice_copy_mask(struct slice_mask *dst,
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000420 const struct slice_mask *src)
Aneesh Kumar K.Vf3207c12017-03-22 09:06:47 +0530421{
Nicholas Pigginb8c93542018-03-07 11:37:14 +1000422 dst->low_slices = src->low_slices;
Christophe Leroydb3a5282018-02-22 15:27:24 +0100423 if (!SLICE_NUM_HIGH)
424 return;
Nicholas Pigginb8c93542018-03-07 11:37:14 +1000425 bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
426}
427
428static inline void slice_or_mask(struct slice_mask *dst,
429 const struct slice_mask *src1,
430 const struct slice_mask *src2)
431{
432 dst->low_slices = src1->low_slices | src2->low_slices;
433 if (!SLICE_NUM_HIGH)
434 return;
435 bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
Aneesh Kumar K.Vf3207c12017-03-22 09:06:47 +0530436}
437
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000438static inline void slice_andnot_mask(struct slice_mask *dst,
Nicholas Pigginb8c93542018-03-07 11:37:14 +1000439 const struct slice_mask *src1,
440 const struct slice_mask *src2)
Aneesh Kumar K.Vf3207c12017-03-22 09:06:47 +0530441{
Nicholas Pigginb8c93542018-03-07 11:37:14 +1000442 dst->low_slices = src1->low_slices & ~src2->low_slices;
Christophe Leroydb3a5282018-02-22 15:27:24 +0100443 if (!SLICE_NUM_HIGH)
444 return;
Nicholas Pigginb8c93542018-03-07 11:37:14 +1000445 bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
Aneesh Kumar K.Vf3207c12017-03-22 09:06:47 +0530446}
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000447
448#ifdef CONFIG_PPC_64K_PAGES
449#define MMU_PAGE_BASE MMU_PAGE_64K
450#else
451#define MMU_PAGE_BASE MMU_PAGE_4K
452#endif
453
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000454unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
455 unsigned long flags, unsigned int psize,
Michel Lespinasse34d07172013-04-29 11:53:52 -0700456 int topdown)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000457{
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000458 struct slice_mask good_mask;
Aneesh Kumar K.Vf3207c12017-03-22 09:06:47 +0530459 struct slice_mask potential_mask;
Nicholas Piggind262bd52018-03-07 11:37:16 +1000460 const struct slice_mask *maskp;
461 const struct slice_mask *compat_maskp = NULL;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000462 int fixed = (flags & MAP_FIXED);
463 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
Nicholas Piggin6a72dc02017-11-10 04:27:36 +1100464 unsigned long page_size = 1UL << pshift;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000465 struct mm_struct *mm = current->mm;
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000466 unsigned long newaddr;
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530467 unsigned long high_limit;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000468
Nicholas Piggin6a72dc02017-11-10 04:27:36 +1100469 high_limit = DEFAULT_MAP_WINDOW;
Nicholas Piggin35602f82017-11-10 04:27:38 +1100470 if (addr >= high_limit || (fixed && (addr + len > high_limit)))
Nicholas Piggin6a72dc02017-11-10 04:27:36 +1100471 high_limit = TASK_SIZE;
472
473 if (len > high_limit)
474 return -ENOMEM;
475 if (len & (page_size - 1))
476 return -EINVAL;
477 if (fixed) {
478 if (addr & (page_size - 1))
479 return -EINVAL;
480 if (addr > high_limit - len)
481 return -ENOMEM;
482 }
483
Nicholas Piggin47224762017-11-10 04:27:40 +1100484 if (high_limit > mm->context.slb_addr_limit) {
Nicholas Piggin5709f7c2018-03-07 11:37:12 +1000485 /*
486 * Increasing the slb_addr_limit does not require
487 * slice mask cache to be recalculated because it should
488 * be already initialised beyond the old address limit.
489 */
Nicholas Piggin47224762017-11-10 04:27:40 +1100490 mm->context.slb_addr_limit = high_limit;
Nicholas Piggin8fed04d2018-09-15 01:30:52 +1000491#ifdef CONFIG_PPC64
492 core_flush_all_slbs(mm);
493#endif
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530494 }
Nicholas Piggin6a72dc02017-11-10 04:27:36 +1100495
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000496 /* Sanity checks */
497 BUG_ON(mm->task_size == 0);
Nicholas Piggin47224762017-11-10 04:27:40 +1100498 BUG_ON(mm->context.slb_addr_limit == 0);
Aneesh Kumar K.V764041e2016-04-29 23:26:09 +1000499 VM_BUG_ON(radix_enabled());
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000500
501 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
Michel Lespinasse34d07172013-04-29 11:53:52 -0700502 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
503 addr, len, flags, topdown);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000504
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000505 /* If hint, make sure it matches our alignment restrictions */
506 if (!fixed && addr) {
Nicholas Piggin6a72dc02017-11-10 04:27:36 +1100507 addr = _ALIGN_UP(addr, page_size);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000508 slice_dbg(" aligned addr=%lx\n", addr);
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000509 /* Ignore hint if it's too large or overlaps a VMA */
Nicholas Piggin6a72dc02017-11-10 04:27:36 +1100510 if (addr > high_limit - len ||
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000511 !slice_area_is_free(mm, addr, len))
512 addr = 0;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000513 }
514
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000515 /* First make up a "good" mask of slices that have the right size
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000516 * already
517 */
Nicholas Piggind262bd52018-03-07 11:37:16 +1000518 maskp = slice_mask_for_size(mm, psize);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000519
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000520 /*
521 * Here "good" means slices that are already the right page size,
522 * "compat" means slices that have a compatible page size (i.e.
523 * 4k in a 64k pagesize kernel), and "free" means slices without
524 * any VMAs.
525 *
526 * If MAP_FIXED:
527 * check if fits in good | compat => OK
528 * check if fits in good | compat | free => convert free
529 * else bad
530 * If have hint:
531 * check if hint fits in good => OK
532 * check if hint fits in good | free => convert free
533 * Otherwise:
534 * search in good, found => OK
535 * search in good | free, found => convert free
536 * search in good | compat | free, found => convert free.
537 */
538
Nicholas Piggind262bd52018-03-07 11:37:16 +1000539 /*
540 * If we support combo pages, we can allow 64k pages in 4k slices
541 * The mask copies could be avoided in most cases here if we had
542 * a pointer to good mask for the next code to use.
543 */
544 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
545 compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000546 if (fixed)
Nicholas Piggind262bd52018-03-07 11:37:16 +1000547 slice_or_mask(&good_mask, maskp, compat_maskp);
548 else
549 slice_copy_mask(&good_mask, maskp);
550 } else {
551 slice_copy_mask(&good_mask, maskp);
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000552 }
Nicholas Piggind262bd52018-03-07 11:37:16 +1000553
554 slice_print_mask(" good_mask", &good_mask);
555 if (compat_maskp)
556 slice_print_mask(" compat_mask", compat_maskp);
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000557
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000558 /* First check hint if it's valid or if we have MAP_FIXED */
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000559 if (addr != 0 || fixed) {
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000560 /* Check if we fit in the good mask. If we do, we just return,
561 * nothing else to do
562 */
Nicholas Pigginae3066b2018-03-07 11:37:13 +1000563 if (slice_check_range_fits(mm, &good_mask, addr, len)) {
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000564 slice_dbg(" fits good !\n");
Aneesh Kumar K.V0dea04b2018-03-26 15:34:47 +0530565 newaddr = addr;
566 goto return_addr;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000567 }
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000568 } else {
569 /* Now let's see if we can find something in the existing
570 * slices for that size
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000571 */
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000572 newaddr = slice_find_area(mm, len, &good_mask,
Aneesh Kumar K.Vf4ea6dc2017-03-30 16:35:21 +0530573 psize, topdown, high_limit);
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000574 if (newaddr != -ENOMEM) {
575 /* Found within the good mask, we don't have to setup,
576 * we thus return directly
577 */
578 slice_dbg(" found area at 0x%lx\n", newaddr);
Aneesh Kumar K.V0dea04b2018-03-26 15:34:47 +0530579 goto return_addr;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000580 }
581 }
Aneesh Kumar K.V7a06c662017-11-10 10:25:07 +0530582 /*
583 * We don't fit in the good mask, check what other slices are
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000584 * empty and thus can be converted
585 */
Aneesh Kumar K.V7a06c662017-11-10 10:25:07 +0530586 slice_mask_for_free(mm, &potential_mask, high_limit);
Nicholas Pigginb8c93542018-03-07 11:37:14 +1000587 slice_or_mask(&potential_mask, &potential_mask, &good_mask);
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000588 slice_print_mask(" potential", &potential_mask);
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000589
Nicholas Pigginae3066b2018-03-07 11:37:13 +1000590 if (addr != 0 || fixed) {
591 if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
592 slice_dbg(" fits potential !\n");
Aneesh Kumar K.V0dea04b2018-03-26 15:34:47 +0530593 newaddr = addr;
Nicholas Pigginae3066b2018-03-07 11:37:13 +1000594 goto convert;
595 }
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000596 }
597
598 /* If we have MAP_FIXED and failed the above steps, then error out */
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000599 if (fixed)
600 return -EBUSY;
601
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000602 slice_dbg(" search...\n");
603
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000604 /* If we had a hint that didn't work out, see if we can fit
605 * anywhere in the good area.
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000606 */
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000607 if (addr) {
Aneesh Kumar K.V0dea04b2018-03-26 15:34:47 +0530608 newaddr = slice_find_area(mm, len, &good_mask,
609 psize, topdown, high_limit);
610 if (newaddr != -ENOMEM) {
611 slice_dbg(" found area at 0x%lx\n", newaddr);
612 goto return_addr;
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000613 }
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000614 }
615
616 /* Now let's see if we can find something in the existing slices
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000617 * for that size plus free slices
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000618 */
Aneesh Kumar K.V0dea04b2018-03-26 15:34:47 +0530619 newaddr = slice_find_area(mm, len, &potential_mask,
620 psize, topdown, high_limit);
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000621
622#ifdef CONFIG_PPC_64K_PAGES
Aneesh Kumar K.V0dea04b2018-03-26 15:34:47 +0530623 if (newaddr == -ENOMEM && psize == MMU_PAGE_64K) {
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000624 /* retry the search with 4k-page slices included */
Nicholas Piggind262bd52018-03-07 11:37:16 +1000625 slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
Aneesh Kumar K.V0dea04b2018-03-26 15:34:47 +0530626 newaddr = slice_find_area(mm, len, &potential_mask,
627 psize, topdown, high_limit);
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000628 }
629#endif
630
Aneesh Kumar K.V0dea04b2018-03-26 15:34:47 +0530631 if (newaddr == -ENOMEM)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000632 return -ENOMEM;
633
Aneesh Kumar K.V0dea04b2018-03-26 15:34:47 +0530634 slice_range_to_mask(newaddr, len, &potential_mask);
635 slice_dbg(" found potential area at 0x%lx\n", newaddr);
Nicholas Piggind262bd52018-03-07 11:37:16 +1000636 slice_print_mask(" mask", &potential_mask);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000637
638 convert:
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +0530639 /*
640 * Try to allocate the context before we do slice convert
641 * so that we handle the context allocation failure gracefully.
642 */
643 if (need_extra_context(mm, newaddr)) {
644 if (alloc_extended_context(mm, newaddr) < 0)
645 return -ENOMEM;
646 }
647
Nicholas Piggind262bd52018-03-07 11:37:16 +1000648 slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
649 if (compat_maskp && !fixed)
650 slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
651 if (potential_mask.low_slices ||
652 (SLICE_NUM_HIGH &&
653 !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
654 slice_convert(mm, &potential_mask, psize);
Nicholas Piggin8fed04d2018-09-15 01:30:52 +1000655#ifdef CONFIG_PPC64
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000656 if (psize > MMU_PAGE_BASE)
Nicholas Piggin8fed04d2018-09-15 01:30:52 +1000657 core_flush_all_slbs(mm);
658#endif
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000659 }
Aneesh Kumar K.V0dea04b2018-03-26 15:34:47 +0530660 return newaddr;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000661
Aneesh Kumar K.Vf384796c2018-03-26 15:34:48 +0530662return_addr:
663 if (need_extra_context(mm, newaddr)) {
664 if (alloc_extended_context(mm, newaddr) < 0)
665 return -ENOMEM;
666 }
667 return newaddr;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000668}
669EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
670
671unsigned long arch_get_unmapped_area(struct file *filp,
672 unsigned long addr,
673 unsigned long len,
674 unsigned long pgoff,
675 unsigned long flags)
676{
677 return slice_get_unmapped_area(addr, len, flags,
Michel Lespinasse34d07172013-04-29 11:53:52 -0700678 current->mm->context.user_psize, 0);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000679}
680
681unsigned long arch_get_unmapped_area_topdown(struct file *filp,
682 const unsigned long addr0,
683 const unsigned long len,
684 const unsigned long pgoff,
685 const unsigned long flags)
686{
687 return slice_get_unmapped_area(addr0, len, flags,
Michel Lespinasse34d07172013-04-29 11:53:52 -0700688 current->mm->context.user_psize, 1);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000689}
690
691unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
692{
Christophe Leroy15472422018-02-22 15:27:28 +0100693 unsigned char *psizes;
Aneesh Kumar K.V7aa07272012-09-10 02:52:52 +0000694 int index, mask_index;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000695
Nicholas Piggin014a32b2018-03-07 11:37:17 +1000696 VM_BUG_ON(radix_enabled());
697
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000698 if (addr < SLICE_LOW_TOP) {
Christophe Leroy15472422018-02-22 15:27:28 +0100699 psizes = mm->context.low_slices_psize;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000700 index = GET_LOW_SLICE_INDEX(addr);
Christophe Leroy15472422018-02-22 15:27:28 +0100701 } else {
702 psizes = mm->context.high_slices_psize;
703 index = GET_HIGH_SLICE_INDEX(addr);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000704 }
Aneesh Kumar K.V7aa07272012-09-10 02:52:52 +0000705 mask_index = index & 0x1;
Christophe Leroy15472422018-02-22 15:27:28 +0100706 return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000707}
708EXPORT_SYMBOL_GPL(get_slice_psize);
709
Nicholas Piggin1753dd12018-03-07 11:37:09 +1000710void slice_init_new_context_exec(struct mm_struct *mm)
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000711{
Christophe Leroy15472422018-02-22 15:27:28 +0100712 unsigned char *hpsizes, *lpsizes;
Nicholas Piggin5709f7c2018-03-07 11:37:12 +1000713 struct slice_mask *mask;
Nicholas Piggin1753dd12018-03-07 11:37:09 +1000714 unsigned int psize = mmu_virtual_psize;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000715
Nicholas Piggin1753dd12018-03-07 11:37:09 +1000716 slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000717
Nicholas Piggin1753dd12018-03-07 11:37:09 +1000718 /*
719 * In the case of exec, use the default limit. In the
720 * case of fork it is just inherited from the mm being
721 * duplicated.
722 */
723#ifdef CONFIG_PPC64
724 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
725#else
726 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
727#endif
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000728
729 mm->context.user_psize = psize;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000730
Nicholas Piggin1753dd12018-03-07 11:37:09 +1000731 /*
732 * Set all slice psizes to the default.
733 */
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000734 lpsizes = mm->context.low_slices_psize;
Nicholas Piggin1753dd12018-03-07 11:37:09 +1000735 memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000736
737 hpsizes = mm->context.high_slices_psize;
Nicholas Piggin1753dd12018-03-07 11:37:09 +1000738 memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
Nicholas Piggin5709f7c2018-03-07 11:37:12 +1000739
740 /*
741 * Slice mask cache starts zeroed, fill the default size cache.
742 */
743 mask = slice_mask_for_size(mm, psize);
744 mask->low_slices = ~0UL;
745 if (SLICE_NUM_HIGH)
746 bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000747}
748
Nicholas Piggin2e162672018-09-15 01:30:55 +1000749#ifdef CONFIG_PPC_BOOK3S_64
750void slice_setup_new_exec(void)
751{
752 struct mm_struct *mm = current->mm;
753
754 slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
755
756 if (!is_32bit_task())
757 return;
758
759 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
760}
761#endif
762
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000763void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
764 unsigned long len, unsigned int psize)
765{
Aneesh Kumar K.Va4d362152017-03-22 09:06:48 +0530766 struct slice_mask mask;
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000767
Aneesh Kumar K.V764041e2016-04-29 23:26:09 +1000768 VM_BUG_ON(radix_enabled());
Aneesh Kumar K.Va4d362152017-03-22 09:06:48 +0530769
770 slice_range_to_mask(start, len, &mask);
Nicholas Piggin830fd2d2018-03-07 11:37:11 +1000771 slice_convert(mm, &mask, psize);
Paul Mackerras3a8247c2008-06-18 15:29:12 +1000772}
773
Aneesh Kumar K.V66437732014-10-21 14:25:38 +1100774#ifdef CONFIG_HUGETLB_PAGE
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000775/*
Adam Buchbinder48fc7f72012-09-19 21:48:00 -0400776 * is_hugepage_only_range() is used by generic code to verify whether
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000777 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
778 *
779 * until the generic code provides a more generic hook and/or starts
780 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
781 * here knows how to deal with), we hijack it to keep standard mappings
782 * away from us.
783 *
784 * because of that generic code limitation, MAP_FIXED mapping cannot
785 * "convert" back a slice with no VMAs to the standard page size, only
786 * get_unmapped_area() can. It would be possible to fix it here but I
787 * prefer working on fixing the generic code instead.
788 *
789 * WARNING: This will not work if hugetlbfs isn't enabled since the
790 * generic code will redefine that function as 0 in that. This is ok
791 * for now as we only use slices with hugetlbfs enabled. This should
792 * be fixed as the generic code gets fixed.
793 */
Nicholas Piggin014a32b2018-03-07 11:37:17 +1000794int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000795 unsigned long len)
796{
Nicholas Piggind262bd52018-03-07 11:37:16 +1000797 const struct slice_mask *maskp;
Dave Kleikamp9ba0fdb2009-01-14 09:09:34 +0000798 unsigned int psize = mm->context.user_psize;
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000799
Nicholas Piggin014a32b2018-03-07 11:37:17 +1000800 VM_BUG_ON(radix_enabled());
Aneesh Kumar K.V764041e2016-04-29 23:26:09 +1000801
Nicholas Piggind262bd52018-03-07 11:37:16 +1000802 maskp = slice_mask_for_size(mm, psize);
Dave Kleikamp9ba0fdb2009-01-14 09:09:34 +0000803#ifdef CONFIG_PPC_64K_PAGES
804 /* We need to account for 4k slices too */
805 if (psize == MMU_PAGE_64K) {
Nicholas Piggind262bd52018-03-07 11:37:16 +1000806 const struct slice_mask *compat_maskp;
807 struct slice_mask available;
808
809 compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
810 slice_or_mask(&available, maskp, compat_maskp);
811 return !slice_check_range_fits(mm, &available, addr, len);
Dave Kleikamp9ba0fdb2009-01-14 09:09:34 +0000812 }
813#endif
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000814
Nicholas Piggind262bd52018-03-07 11:37:16 +1000815 return !slice_check_range_fits(mm, maskp, addr, len);
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000816}
Aneesh Kumar K.V66437732014-10-21 14:25:38 +1100817#endif