diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4b61541853ea1c7ea6d996bf0332d7eb4f69ba2f..82ffac621854f863f8cb4b9b4f17621ffc83eb7a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -381,6 +381,7 @@ static int __init atomic_pool_init(void) */ postcore_initcall(atomic_pool_init); +#ifdef CONFIG_CMA_AREAS struct dma_contig_early_reserve { phys_addr_t base; unsigned long size; @@ -435,6 +436,7 @@ void __init dma_contiguous_remap(void) iotable_init(&map, 1); } } +#endif static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) { diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 9ff683612f2a8cfc3e59f198e2b806415878bf63..d7ffccb7fea7d6f759104b62f9ab40a8d53cdeeb 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -88,6 +88,10 @@ extern phys_addr_t arm_lowmem_limit; void __init bootmem_init(void); void arm_mm_memblock_reserve(void); +#ifdef CONFIG_CMA_AREAS void dma_contiguous_remap(void); +#else +static inline void dma_contiguous_remap(void) { } +#endif unsigned long __clear_cr(unsigned long mask); diff --git a/include/linux/cma.h b/include/linux/cma.h index 90fd742fd1ef5436f100bea5d3bcbdafafcacf33..a6f637342740b921412eb183b4416fa72f128a9f 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -12,10 +12,6 @@ */ #ifdef CONFIG_CMA_AREAS #define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS) - -#else -#define MAX_CMA_AREAS (0) - #endif #define CMA_MAX_NAME 64 diff --git a/security/Kconfig b/security/Kconfig index 1d2d71cc1f36ce4af728f7bbefc3deb9cb63399a..9b2c4925585a3a761019cd8b5358ecf09279a29c 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -166,7 +166,7 @@ config HARDENED_USERCOPY config HARDENED_USERCOPY_PAGESPAN bool "Refuse to copy allocations that span multiple pages" depends on HARDENED_USERCOPY - depends on EXPERT + depends on BROKEN help When a multi-page allocation is done without __GFP_COMP, hardened usercopy will reject attempts to copy it. There are,