summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/crypto/Kconfig14
-rw-r--r--arch/arm/crypto/aes-ce-glue.c2
-rw-r--r--arch/arm/crypto/aes-cipher-glue.c5
-rw-r--r--arch/arm/crypto/aes-cipher.h13
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c133
-rw-r--r--arch/arm/include/asm/arm_pmuv3.h20
-rw-r--r--arch/arm/include/asm/cpu.h1
-rw-r--r--arch/arm/include/asm/dma-iommu.h2
-rw-r--r--arch/arm/include/asm/hypervisor.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h5
-rw-r--r--arch/arm/kernel/setup.c14
-rw-r--r--arch/arm/mm/dma-mapping.c12
-rw-r--r--arch/arm/mm/mmu.c6
-rw-r--r--arch/arm/vfp/vfpinstr.h48
15 files changed, 137 insertions, 141 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 173159e93c99..0ec034933cae 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -64,6 +64,7 @@ config ARM
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_IRQ_IPI if SMP
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_CPU_DEVICES
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_MULTI_HANDLER
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 847b7a003356..5ff49a5e9afc 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -166,10 +166,9 @@ config CRYPTO_AES_ARM
config CRYPTO_AES_ARM_BS
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)"
depends on KERNEL_MODE_NEON
+ select CRYPTO_AES_ARM
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
- select CRYPTO_AES
- select CRYPTO_CBC
select CRYPTO_SIMD
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
@@ -183,8 +182,15 @@ config CRYPTO_AES_ARM_BS
Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
and for XTS mode encryption, CBC and XTS mode decryption speedup is
around 25%. (CBC encryption speed is not affected by this driver.)
- This implementation does not rely on any lookup tables so it is
- believed to be invulnerable to cache timing attacks.
+
+ The bit sliced AES code does not use lookup tables, so it is believed
+ to be invulnerable to cache timing attacks. However, since the bit
+ sliced AES code cannot process single blocks efficiently, in certain
+ cases table-based code with some countermeasures against cache timing
+ attacks will still be used as a fallback method; specifically CBC
+ encryption (not CBC decryption), the encryption of XTS tweaks, XTS
+ ciphertext stealing when the message isn't a multiple of 16 bytes, and
+ CTR when invoked in a context in which NEON instructions are unusable.
config CRYPTO_AES_ARM_CE
tristate "Ciphers: AES, modes: ECB/CBC/CTS/CTR/XTS (ARMv8 Crypto Extensions)"
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index b668c97663ec..f5b66f4cf45d 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -711,7 +711,7 @@ static int __init aes_init(void)
algname = aes_algs[i].base.cra_name + 2;
drvname = aes_algs[i].base.cra_driver_name + 2;
basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
+ simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto unregister_simds;
diff --git a/arch/arm/crypto/aes-cipher-glue.c b/arch/arm/crypto/aes-cipher-glue.c
index 6dfaef2d8f91..29efb7833960 100644
--- a/arch/arm/crypto/aes-cipher-glue.c
+++ b/arch/arm/crypto/aes-cipher-glue.c
@@ -9,9 +9,10 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
+#include "aes-cipher.h"
-asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
-asmlinkage void __aes_arm_decrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
+EXPORT_SYMBOL_GPL(__aes_arm_encrypt);
+EXPORT_SYMBOL_GPL(__aes_arm_decrypt);
static void aes_arm_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
diff --git a/arch/arm/crypto/aes-cipher.h b/arch/arm/crypto/aes-cipher.h
new file mode 100644
index 000000000000..d5db2b87eb69
--- /dev/null
+++ b/arch/arm/crypto/aes-cipher.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef ARM_CRYPTO_AES_CIPHER_H
+#define ARM_CRYPTO_AES_CIPHER_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds,
+ const u8 *in, u8 *out);
+asmlinkage void __aes_arm_decrypt(const u32 rk[], int rounds,
+ const u8 *in, u8 *out);
+
+#endif /* ARM_CRYPTO_AES_CIPHER_H */
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index 201eb35dde37..f6be80b5938b 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -9,24 +9,22 @@
#include <asm/simd.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <linux/module.h>
+#include "aes-cipher.h"
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("Bit sliced AES using NEON instructions");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ecb(aes)");
-MODULE_ALIAS_CRYPTO("cbc(aes)-all");
+MODULE_ALIAS_CRYPTO("cbc(aes)");
MODULE_ALIAS_CRYPTO("ctr(aes)");
MODULE_ALIAS_CRYPTO("xts(aes)");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
-
asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
@@ -52,13 +50,13 @@ struct aesbs_ctx {
struct aesbs_cbc_ctx {
struct aesbs_ctx key;
- struct crypto_skcipher *enc_tfm;
+ struct crypto_aes_ctx fallback;
};
struct aesbs_xts_ctx {
struct aesbs_ctx key;
- struct crypto_cipher *cts_tfm;
- struct crypto_cipher *tweak_tfm;
+ struct crypto_aes_ctx fallback;
+ struct crypto_aes_ctx tweak_key;
};
struct aesbs_ctr_ctx {
@@ -129,37 +127,49 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_aes_ctx rk;
int err;
- err = aes_expandkey(&rk, in_key, key_len);
+ err = aes_expandkey(&ctx->fallback, in_key, key_len);
if (err)
return err;
ctx->key.rounds = 6 + key_len / 4;
kernel_neon_begin();
- aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
+ aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
kernel_neon_end();
- memzero_explicit(&rk, sizeof(rk));
- return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len);
+ return 0;
}
static int cbc_encrypt(struct skcipher_request *req)
{
- struct skcipher_request *subreq = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
+ int err;
- skcipher_request_set_tfm(subreq, ctx->enc_tfm);
- skcipher_request_set_callback(subreq,
- skcipher_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst,
- req->cryptlen, req->iv);
+ err = skcipher_walk_virt(&walk, req, false);
- return crypto_skcipher_encrypt(subreq);
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+ const u8 *src = walk.src.virt.addr;
+ u8 *dst = walk.dst.virt.addr;
+ u8 *prev = walk.iv;
+
+ do {
+ crypto_xor_cpy(dst, src, prev, AES_BLOCK_SIZE);
+ __aes_arm_encrypt(ctx->fallback.key_enc,
+ ctx->key.rounds, dst, dst);
+ prev = dst;
+ src += AES_BLOCK_SIZE;
+ dst += AES_BLOCK_SIZE;
+ nbytes -= AES_BLOCK_SIZE;
+ } while (nbytes >= AES_BLOCK_SIZE);
+ memcpy(walk.iv, prev, AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+ return err;
}
static int cbc_decrypt(struct skcipher_request *req)
@@ -190,30 +200,6 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
-static int cbc_init(struct crypto_skcipher *tfm)
-{
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned int reqsize;
-
- ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->enc_tfm))
- return PTR_ERR(ctx->enc_tfm);
-
- reqsize = sizeof(struct skcipher_request);
- reqsize += crypto_skcipher_reqsize(ctx->enc_tfm);
- crypto_skcipher_set_reqsize(tfm, reqsize);
-
- return 0;
-}
-
-static void cbc_exit(struct crypto_skcipher *tfm)
-{
- struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_skcipher(ctx->enc_tfm);
-}
-
static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -271,16 +257,8 @@ static int ctr_encrypt(struct skcipher_request *req)
static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
{
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- unsigned long flags;
-
- /*
- * Temporarily disable interrupts to avoid races where
- * cachelines are evicted when the CPU is interrupted
- * to do something else.
- */
- local_irq_save(flags);
- aes_encrypt(&ctx->fallback, dst, src);
- local_irq_restore(flags);
+
+ __aes_arm_encrypt(ctx->fallback.key_enc, ctx->key.rounds, src, dst);
}
static int ctr_encrypt_sync(struct skcipher_request *req)
@@ -302,45 +280,23 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
return err;
key_len /= 2;
- err = crypto_cipher_setkey(ctx->cts_tfm, in_key, key_len);
+ err = aes_expandkey(&ctx->fallback, in_key, key_len);
if (err)
return err;
- err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len);
+ err = aes_expandkey(&ctx->tweak_key, in_key + key_len, key_len);
if (err)
return err;
return aesbs_setkey(tfm, in_key, key_len);
}
-static int xts_init(struct crypto_skcipher *tfm)
-{
- struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->cts_tfm))
- return PTR_ERR(ctx->cts_tfm);
-
- ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tweak_tfm))
- crypto_free_cipher(ctx->cts_tfm);
-
- return PTR_ERR_OR_ZERO(ctx->tweak_tfm);
-}
-
-static void xts_exit(struct crypto_skcipher *tfm)
-{
- struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_cipher(ctx->tweak_tfm);
- crypto_free_cipher(ctx->cts_tfm);
-}
-
static int __xts_crypt(struct skcipher_request *req, bool encrypt,
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[], int))
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const int rounds = ctx->key.rounds;
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
u8 buf[2 * AES_BLOCK_SIZE];
@@ -364,7 +320,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
if (err)
return err;
- crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
+ __aes_arm_encrypt(ctx->tweak_key.key_enc, rounds, walk.iv, walk.iv);
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -378,7 +334,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
kernel_neon_begin();
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
- ctx->key.rounds, blocks, walk.iv, reorder_last_tweak);
+ rounds, blocks, walk.iv, reorder_last_tweak);
kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
@@ -396,9 +352,9 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
if (encrypt)
- crypto_cipher_encrypt_one(ctx->cts_tfm, buf, buf);
+ __aes_arm_encrypt(ctx->fallback.key_enc, rounds, buf, buf);
else
- crypto_cipher_decrypt_one(ctx->cts_tfm, buf, buf);
+ __aes_arm_decrypt(ctx->fallback.key_dec, rounds, buf, buf);
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
@@ -439,8 +395,7 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.base.cra_module = THIS_MODULE,
- .base.cra_flags = CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@@ -449,8 +404,6 @@ static struct skcipher_alg aes_algs[] = { {
.setkey = aesbs_cbc_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
- .init = cbc_init,
- .exit = cbc_exit,
}, {
.base.cra_name = "__ctr(aes)",
.base.cra_driver_name = "__ctr-aes-neonbs",
@@ -500,8 +453,6 @@ static struct skcipher_alg aes_algs[] = { {
.setkey = aesbs_xts_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
- .init = xts_init,
- .exit = xts_exit,
} };
static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
@@ -540,7 +491,7 @@ static int __init aes_init(void)
algname = aes_algs[i].base.cra_name + 2;
drvname = aes_algs[i].base.cra_driver_name + 2;
basename = aes_algs[i].base.cra_driver_name;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
+ simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto unregister_simds;
diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
index a41b503b7dcd..f63ba8986b24 100644
--- a/arch/arm/include/asm/arm_pmuv3.h
+++ b/arch/arm/include/asm/arm_pmuv3.h
@@ -127,6 +127,12 @@ static inline u32 read_pmuver(void)
return (dfr0 >> 24) & 0xf;
}
+static inline bool pmuv3_has_icntr(void)
+{
+ /* FEAT_PMUv3_ICNTR not accessible for 32-bit */
+ return false;
+}
+
static inline void write_pmcr(u32 val)
{
write_sysreg(val, PMCR);
@@ -152,6 +158,13 @@ static inline u64 read_pmccntr(void)
return read_sysreg(PMCCNTR);
}
+static inline void write_pmicntr(u64 val) {}
+
+static inline u64 read_pmicntr(void)
+{
+ return 0;
+}
+
static inline void write_pmcntenset(u32 val)
{
write_sysreg(val, PMCNTENSET);
@@ -177,6 +190,13 @@ static inline void write_pmccfiltr(u32 val)
write_sysreg(val, PMCCFILTR);
}
+static inline void write_pmicfiltr(u64 val) {}
+
+static inline u64 read_pmicfiltr(void)
+{
+ return 0;
+}
+
static inline void write_pmovsclr(u32 val)
{
write_sysreg(val, PMOVSR);
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
index bd6fdb4b922d..9d8863537aa5 100644
--- a/arch/arm/include/asm/cpu.h
+++ b/arch/arm/include/asm/cpu.h
@@ -11,7 +11,6 @@
#include <linux/cpu.h>
struct cpuinfo_arm {
- struct cpu cpu;
u32 cpuid;
#ifdef CONFIG_SMP
unsigned int loops_per_jiffy;
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 82ec1ccf1fee..2ce4c5683e6d 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -24,7 +24,7 @@ struct dma_iommu_mapping {
};
struct dma_iommu_mapping *
-arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size);
+arm_iommu_create_mapping(struct device *dev, dma_addr_t base, u64 size);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h
index bd61502b9715..8a648e506540 100644
--- a/arch/arm/include/asm/hypervisor.h
+++ b/arch/arm/include/asm/hypervisor.h
@@ -7,4 +7,6 @@
void kvm_init_hyp_services(void);
bool kvm_arm_hyp_service_available(u32 func_id);
+static inline void kvm_arch_init_hyp_services(void) { };
+
#endif
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index dfab3e982cbf..944fc9955528 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -106,6 +106,11 @@
/*
* TTBCR register bits.
+ *
+ * The ORGN0 and IRGN0 bits enables different forms of caching when
+ * walking the translation table. Clearing these bits (which is claimed
+ * to be the reset default) means "normal memory, [outer|inner]
+ * non-cacheable"
*/
#define TTBCR_EAE (1 << 31)
#define TTBCR_IMP (1 << 30)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 7b33b157fca0..e6a857bf0ce6 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1201,20 +1201,10 @@ void __init setup_arch(char **cmdline_p)
mdesc->init_early();
}
-
-static int __init topology_init(void)
+bool arch_cpu_is_hotpluggable(int num)
{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
- cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
- register_cpu(&cpuinfo->cpu, cpu);
- }
-
- return 0;
+ return platform_can_hotplug_cpu(num);
}
-subsys_initcall(topology_init);
#ifdef CONFIG_HAVE_PROC_CPU
static int __init proc_cpu_init(void)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 5adf1769eee4..88c2d68a69c9 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1532,7 +1532,7 @@ static const struct dma_map_ops iommu_ops = {
/**
* arm_iommu_create_mapping
- * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @dev: pointer to the client device (for IOMMU calls)
* @base: start address of the valid IO address space
* @size: maximum size of the valid IO address space
*
@@ -1544,7 +1544,7 @@ static const struct dma_map_ops iommu_ops = {
* arm_iommu_attach_device function.
*/
struct dma_iommu_mapping *
-arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size)
+arm_iommu_create_mapping(struct device *dev, dma_addr_t base, u64 size)
{
unsigned int bits = size >> PAGE_SHIFT;
unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
@@ -1585,9 +1585,11 @@ arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size)
spin_lock_init(&mapping->lock);
- mapping->domain = iommu_domain_alloc(bus);
- if (!mapping->domain)
+ mapping->domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(mapping->domain)) {
+ err = PTR_ERR(mapping->domain);
goto err4;
+ }
kref_init(&mapping->kref);
return mapping;
@@ -1718,7 +1720,7 @@ static void arm_setup_iommu_dma_ops(struct device *dev)
dma_base = dma_range_map_min(dev->dma_range_map);
size = dma_range_map_max(dev->dma_range_map) - dma_base;
}
- mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
+ mapping = arm_iommu_create_mapping(dev, dma_base, size);
if (IS_ERR(mapping)) {
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
size, dev_name(dev));
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 3f774856ca67..f85c177cdf8d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1638,7 +1638,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
{
pgtables_remap *lpae_pgtables_remap;
unsigned long pa_pgd;
- unsigned int cr, ttbcr;
+ u32 cr, ttbcr, tmp;
long long offset;
if (!mdesc->pv_fixup)
@@ -1688,7 +1688,9 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
cr = get_cr();
set_cr(cr & ~(CR_I | CR_C));
ttbcr = cpu_get_ttbcr();
- cpu_set_ttbcr(ttbcr & ~(3 << 8 | 3 << 10));
+ /* Disable all kind of caching of the translation table */
+ tmp = ttbcr & ~(TTBCR_ORGN0_MASK | TTBCR_IRGN0_MASK);
+ cpu_set_ttbcr(tmp);
flush_cache_all();
/*
diff --git a/arch/arm/vfp/vfpinstr.h b/arch/arm/vfp/vfpinstr.h
index 3c7938fd40aa..32090b0fb250 100644
--- a/arch/arm/vfp/vfpinstr.h
+++ b/arch/arm/vfp/vfpinstr.h
@@ -64,33 +64,37 @@
#ifdef CONFIG_AS_VFP_VMRS_FPINST
-#define fmrx(_vfp_) ({ \
- u32 __v; \
- asm(".fpu vfpv2\n" \
- "vmrs %0, " #_vfp_ \
- : "=r" (__v) : : "cc"); \
- __v; \
- })
-
-#define fmxr(_vfp_,_var_) \
- asm(".fpu vfpv2\n" \
- "vmsr " #_vfp_ ", %0" \
- : : "r" (_var_) : "cc")
+#define fmrx(_vfp_) ({ \
+ u32 __v; \
+ asm volatile (".fpu vfpv2\n" \
+ "vmrs %0, " #_vfp_ \
+ : "=r" (__v) : : "cc"); \
+ __v; \
+})
+
+#define fmxr(_vfp_, _var_) ({ \
+ asm volatile (".fpu vfpv2\n" \
+ "vmsr " #_vfp_ ", %0" \
+ : : "r" (_var_) : "cc"); \
+})
#else
#define vfpreg(_vfp_) #_vfp_
-#define fmrx(_vfp_) ({ \
- u32 __v; \
- asm("mrc p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \
- : "=r" (__v) : : "cc"); \
- __v; \
- })
-
-#define fmxr(_vfp_,_var_) \
- asm("mcr p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \
- : : "r" (_var_) : "cc")
+#define fmrx(_vfp_) ({ \
+ u32 __v; \
+ asm volatile ("mrc p10, 7, %0, " vfpreg(_vfp_) "," \
+ "cr0, 0 @ fmrx %0, " #_vfp_ \
+ : "=r" (__v) : : "cc"); \
+ __v; \
+})
+
+#define fmxr(_vfp_, _var_) ({ \
+ asm volatile ("mcr p10, 7, %0, " vfpreg(_vfp_) "," \
+ "cr0, 0 @ fmxr " #_vfp_ ", %0" \
+ : : "r" (_var_) : "cc"); \
+})
#endif