Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-26 20:38:31 +0100
committerThomas Gleixner <tglx@linutronix.de>2010-02-26 20:38:31 +0100
commit0d518ab64e5b1bf6ace8257c3eeb94b1ffcef2a2 (patch)
tree71fa88c393701e3849c966b2f400784ce8620257
parent5fd3a249542494b19e3c77c92b5d5ddc7fb8ece4 (diff)
parentdcfb059a06c1f62061edaece74560f44925279f2 (diff)
Merge branch 'rt/head' into rt/2.6.33
Conflicts: Makefile Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--Makefile2
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/microblaze/include/asm/io.h2
-rw-r--r--arch/microblaze/kernel/cpu/cache.c27
-rw-r--r--arch/parisc/kernel/pci.c7
-rw-r--r--arch/x86/include/asm/highmem.h9
-rw-r--r--arch/x86/mm/highmem_32.c27
-rw-r--r--arch/x86/mm/iomap_32.c32
-rw-r--r--arch/x86/pci/mmconfig-shared.c17
-rw-r--r--block/blk-core.c11
-rw-r--r--drivers/mmc/host/omap_hsmmc.c70
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--mm/highmem.c152
-rw-r--r--security/integrity/ima/ima_iint.c3
14 files changed, 187 insertions, 178 deletions
diff --git a/Makefile b/Makefile
index 95f232e8d60f..4f1a4630070f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 33
-EXTRAVERSION = -rc8-rt2
+EXTRAVERSION = -rt2
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index ece1bf994499..e456f062f241 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
-DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
+DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index fc9997b73c09..267c7c779e53 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -217,7 +217,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
* Little endian
*/
-#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a));
+#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
#define in_le32(a) __le32_to_cpu(__raw_readl(a))
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index d9d63831cc2f..2a56bccce4e0 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -172,16 +172,15 @@ do { \
/* It is used only first parameter for OP - for wic, wdc */
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
do { \
- int step = -line_length; \
- int count = end - start; \
- BUG_ON(count <= 0); \
+ int volatile temp; \
+ BUG_ON(end - start <= 0); \
\
- __asm__ __volatile__ (" 1: addk %0, %0, %1; \
- " #op " %0, r0; \
- bgtid %1, 1b; \
- addk %1, %1, %2; \
- " : : "r" (start), "r" (count), \
- "r" (step) : "memory"); \
+ __asm__ __volatile__ (" 1: " #op " %1, r0; \
+ cmpu %0, %1, %2; \
+ bgtid %0, 1b; \
+ addk %1, %1, %3; \
+ " : : "r" (temp), "r" (start), "r" (end),\
+ "r" (line_length) : "memory"); \
} while (0);
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
@@ -313,16 +312,6 @@ static void __invalidate_dcache_all_wb(void)
pr_debug("%s\n", __func__);
CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.clear)
-
-#if 0
- unsigned int i;
-
- pr_debug("%s\n", __func__);
-
- /* Just loop through cache size and invalidate it */
- for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
- __invalidate_dcache(0, i);
-#endif
}
static void __invalidate_dcache_range_wb(unsigned long start,
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index f7064abc3bb6..9e74bfe071dc 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -18,7 +18,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <asm/cache.h> /* for L1_CACHE_BYTES */
#include <asm/superio.h>
#define DEBUG_RESOURCES 0
@@ -123,6 +122,10 @@ static int __init pcibios_init(void)
} else {
printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
}
+
+ /* Set the CLS for PCI as early as possible. */
+ pci_cache_line_size = pci_dfl_cache_line_size;
+
return 0;
}
@@ -171,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
** upper byte is PCI_LATENCY_TIMER.
*/
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
- (0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32)));
+ (0x80 << 8) | pci_cache_line_size);
}
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 433ae1f02e95..827965748b96 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -55,14 +55,17 @@ extern unsigned long highstart_pfn, highend_pfn;
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
extern void *kmap_high(struct page *page);
+extern void *kmap_pfn_prot(unsigned long pfn, pgprot_t prot);
extern void kunmap_high(struct page *page);
void *kmap(struct page *page);
+void *kmap_page_prot(struct page *page, pgprot_t prot);
extern void kunmap_virt(void *ptr);
extern struct page *kmap_to_page(void *ptr);
void kunmap(struct page *page);
void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
+void *__kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
void *__kmap_atomic(struct page *page, enum km_type type);
void *__kmap_atomic_direct(struct page *page, enum km_type type);
void __kunmap_atomic(void *kvaddr, enum km_type type);
@@ -85,15 +88,17 @@ extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
* on PREEMPT_RT kmap_atomic() is a wrapper that uses kmap():
*/
#ifdef CONFIG_PREEMPT_RT
-# define kmap_atomic_prot(page, type, prot) ({ pagefault_disable(); kmap(page); })
+# define kmap_atomic_prot(page, type, prot) ({ pagefault_disable(); kmap_pfn_prot(page_to_pfn(page), prot); })
+# define kmap_atomic_prot_pfn(pfn, type, prot) ({ pagefault_disable(); kmap_pfn_prot(pfn, prot); })
# define kmap_atomic(page, type) ({ pagefault_disable(); kmap(page); })
# define kmap_atomic_pfn(pfn, type) kmap(pfn_to_page(pfn))
-# define kunmap_atomic(kvaddr, type) do { pagefault_enable(); kunmap_virt(kvaddr); } while(0)
+# define kunmap_atomic(kvaddr, type) do { kunmap_virt(kvaddr); pagefault_enable(); } while(0)
# define kmap_atomic_to_page(kvaddr) kmap_to_page(kvaddr)
# define kmap_atomic_direct(page, type) __kmap_atomic_direct(page, type)
# define kunmap_atomic_direct(kvaddr, type) __kunmap_atomic(kvaddr, type)
#else
# define kmap_atomic_prot(page, type, prot) __kmap_atomic_prot(page, type, prot)
+# define kmap_atomic_prot_pfn(pfn, type, prot) __kmap_atomic_prot_pfn(pfn, type, prot)
# define kmap_atomic(page, type) __kmap_atomic(page, type)
# define kmap_atomic_pfn(pfn, type) __kmap_atomic_pfn(pfn, type)
# define kunmap_atomic(kvaddr, type) __kunmap_atomic(kvaddr, type)
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index dcb1899bcff7..b4eb59a59ef4 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -19,16 +19,6 @@ void kunmap(struct page *page)
kunmap_high(page);
}
-void kunmap_virt(void *ptr)
-{
- struct page *page;
-
- if ((unsigned long)ptr < PKMAP_ADDR(0))
- return;
- page = pte_page(pkmap_page_table[PKMAP_NR((unsigned long)ptr)]);
- kunmap(page);
-}
-
struct page *kmap_to_page(void *ptr)
{
struct page *page;
@@ -70,6 +60,23 @@ void *__kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
return (void *)vaddr;
}
+void *__kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+{
+ enum fixed_addresses idx;
+ unsigned long vaddr;
+
+ preempt_disable();
+ pagefault_disable();
+
+ debug_kmap_atomic(type);
+ idx = type + KM_TYPE_NR * smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
+ arch_flush_lazy_mmu_mode();
+
+ return (void *)vaddr;
+}
+
void *__kmap_atomic_direct(struct page *page, enum km_type type)
{
return __kmap_atomic_prot(page, type, kmap_prot);
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 715d822334b4..38a1a6845a73 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -55,23 +55,6 @@ iomap_free(resource_size_t base, unsigned long size)
}
EXPORT_SYMBOL_GPL(iomap_free);
-void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
-{
- enum fixed_addresses idx;
- unsigned long vaddr;
-
- preempt_disable();
- pagefault_disable();
-
- debug_kmap_atomic(type);
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-}
-
/*
* Map 'pfn' using fixed map 'type' and protections 'prot'
*/
@@ -94,19 +77,6 @@ EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
void
iounmap_atomic(void *kvaddr, enum km_type type)
{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
- enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
-
- /*
- * Force other mappings to Oops if they'll try to access this pte
- * without first remap it. Keeping stale mappings around is a bad idea
- * also, in case the page changes cacheability attributes or becomes
- * a protected page in a hypervisor.
- */
- if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
- kpte_clear_flush(kmap_pte-idx, vaddr);
-
- pagefault_enable();
- preempt_enable();
+ kunmap_atomic(kvaddr, type);
}
EXPORT_SYMBOL_GPL(iounmap_atomic);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index b19d1e54201e..8f3f9a50b1e0 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -303,22 +303,17 @@ static void __init pci_mmcfg_check_end_bus_number(void)
{
struct pci_mmcfg_region *cfg, *cfgx;
- /* last one*/
- cfg = list_entry(pci_mmcfg_list.prev, typeof(*cfg), list);
- if (cfg)
- if (cfg->end_bus < cfg->start_bus)
- cfg->end_bus = 255;
-
- if (list_is_singular(&pci_mmcfg_list))
- return;
-
- /* don't overlap please */
+ /* Fixup overlaps */
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
if (cfg->end_bus < cfg->start_bus)
cfg->end_bus = 255;
+ /* Don't access the list head ! */
+ if (cfg->list.next == &pci_mmcfg_list)
+ break;
+
cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
- if (cfg != cfgx && cfg->end_bus >= cfgx->start_bus)
+ if (cfg->end_bus >= cfgx->start_bus)
cfg->end_bus = cfgx->start_bus - 1;
}
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 800d396f8924..94cf03ecac42 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
*/
static inline bool queue_should_plug(struct request_queue *q)
{
- return !(blk_queue_nonrot(q) && blk_queue_queuing(q));
+ return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
}
static int __make_request(struct request_queue *q, struct bio *bio)
@@ -1859,15 +1859,8 @@ void blk_dequeue_request(struct request *rq)
* and to it is freed is accounted as io that is in progress at
* the driver side.
*/
- if (blk_account_rq(rq)) {
+ if (blk_account_rq(rq))
q->in_flight[rq_is_sync(rq)]++;
- /*
- * Mark this device as supporting hardware queuing, if
- * we have more IOs in flight than 4.
- */
- if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
- set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
- }
}
/**
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4b2322518909..99a3383309ec 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -468,14 +468,6 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
if (host->use_dma)
cmdreg |= DMA_EN;
- /*
- * In an interrupt context (i.e. STOP command), the spinlock is unlocked
- * by the interrupt handler, otherwise (i.e. for a new request) it is
- * unlocked here.
- */
- if (!in_interrupt())
- spin_unlock_irqrestore(&host->irq_lock, host->flags);
-
OMAP_HSMMC_WRITE(host->base, ARG, cmd->arg);
OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
}
@@ -506,7 +498,9 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
}
host->mrq = NULL;
+ spin_unlock(&host->irq_lock);
mmc_request_done(host->mmc, mrq);
+ spin_lock(&host->irq_lock);
return;
}
@@ -523,7 +517,9 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
if (!data->stop) {
host->mrq = NULL;
+ spin_unlock(&host->irq_lock);
mmc_request_done(host->mmc, data->mrq);
+ spin_lock(&host->irq_lock);
return;
}
omap_hsmmc_start_command(host, data->stop, NULL);
@@ -551,7 +547,9 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
}
if ((host->data == NULL && !host->response_busy) || cmd->error) {
host->mrq = NULL;
+ spin_unlock(&host->irq_lock);
mmc_request_done(host->mmc, cmd->mrq);
+ spin_lock(&host->irq_lock);
}
}
@@ -1077,37 +1075,31 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
struct omap_hsmmc_host *host = mmc_priv(mmc);
int err;
+ spin_lock_irqsave(&host->irq_lock, host->flags);
/*
- * Prevent races with the interrupt handler because of unexpected
- * interrupts, but not if we are already in interrupt context i.e.
- * retries.
+ * Protect the card from I/O if there is a possibility
+ * it can be removed.
*/
- if (!in_interrupt()) {
- spin_lock_irqsave(&host->irq_lock, host->flags);
- /*
- * Protect the card from I/O if there is a possibility
- * it can be removed.
- */
- if (host->protect_card) {
- if (host->reqs_blocked < 3) {
- /*
- * Ensure the controller is left in a consistent
- * state by resetting the command and data state
- * machines.
- */
- omap_hsmmc_reset_controller_fsm(host, SRD);
- omap_hsmmc_reset_controller_fsm(host, SRC);
- host->reqs_blocked += 1;
- }
- req->cmd->error = -EBADF;
- if (req->data)
- req->data->error = -EBADF;
- spin_unlock_irqrestore(&host->irq_lock, host->flags);
- mmc_request_done(mmc, req);
- return;
- } else if (host->reqs_blocked)
- host->reqs_blocked = 0;
- }
+ if (host->protect_card) {
+ if (host->reqs_blocked < 3) {
+ /*
+ * Ensure the controller is left in a consistent
+ * state by resetting the command and data state
+ * machines.
+ */
+ omap_hsmmc_reset_controller_fsm(host, SRD);
+ omap_hsmmc_reset_controller_fsm(host, SRC);
+ host->reqs_blocked += 1;
+ }
+ req->cmd->error = -EBADF;
+ if (req->data)
+ req->data->error = -EBADF;
+ spin_unlock_irqrestore(&host->irq_lock, host->flags);
+ mmc_request_done(mmc, req);
+ return;
+ } else if (host->reqs_blocked)
+ host->reqs_blocked = 0;
+
WARN_ON(host->mrq != NULL);
host->mrq = req;
err = omap_hsmmc_prepare_data(host, req);
@@ -1116,13 +1108,13 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
if (req->data)
req->data->error = err;
host->mrq = NULL;
- if (!in_interrupt())
- spin_unlock_irqrestore(&host->irq_lock, host->flags);
+ spin_unlock_irqrestore(&host->irq_lock, host->flags);
mmc_request_done(mmc, req);
return;
}
omap_hsmmc_start_command(host, req->cmd, req->data);
+ spin_unlock_irqrestore(&host->irq_lock, host->flags);
}
/* Routine to configure clock values. Exposed API to core */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5c8018977efa..1896e868854f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -461,8 +461,7 @@ struct request_queue
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
-#define QUEUE_FLAG_CQ 16 /* hardware does queuing */
-#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */
+#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_CLUSTER) | \
@@ -586,7 +585,6 @@ enum {
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
-#define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
diff --git a/mm/highmem.c b/mm/highmem.c
index 446b75caa521..1b534a86cd7e 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -66,7 +66,13 @@ unsigned int nr_free_highpages (void)
* 1 means its free for use - either mapped or not.
* n means that there are (n-1) current users of it.
*/
-static atomic_t pkmap_count[LAST_PKMAP];
+
+struct pkmap_state {
+ atomic_t count;
+ int pfn;
+};
+
+static struct pkmap_state pkmap[LAST_PKMAP];
static atomic_t pkmap_hand;
static atomic_t pkmap_free;
static atomic_t pkmap_users;
@@ -105,25 +111,26 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_wait);
*/
static int pkmap_try_free(int pos)
{
- if (atomic_cmpxchg(&pkmap_count[pos], 1, 0) != 1)
+ if (atomic_cmpxchg(&pkmap[pos].count, 1, 0) != 1)
return -1;
atomic_dec(&pkmap_free);
/*
* TODO: add a young bit to make it CLOCK
*/
if (!pte_none(pkmap_page_table[pos])) {
- struct page *page = pte_page(pkmap_page_table[pos]);
unsigned long addr = PKMAP_ADDR(pos);
pte_t *ptep = &pkmap_page_table[pos];
- VM_BUG_ON(addr != (unsigned long)page_address(page));
+ if (!pkmap[pos].pfn) {
+ struct page *page = pte_page(pkmap_page_table[pos]);
+ VM_BUG_ON(addr != (unsigned long)page_address(page));
+ if (!__set_page_address(page, NULL, pos))
+ BUG();
+ flush_kernel_dcache_page(page);
+ }
- if (!__set_page_address(page, NULL, pos))
- BUG();
- flush_kernel_dcache_page(page);
pte_clear(&init_mm, addr, ptep);
-
return 1;
}
@@ -187,7 +194,7 @@ got_one:
continue;
if (!flush) {
- atomic_t *counter = &pkmap_count[pos2];
+ atomic_t *counter = &pkmap[pos2].count;
VM_BUG_ON(atomic_read(counter) != 0);
atomic_set(counter, 2);
pkmap_put(counter);
@@ -197,7 +204,7 @@ got_one:
flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
for (i = 0; i < nr; i++) {
- atomic_t *counter = &pkmap_count[entries[i]];
+ atomic_t *counter = &pkmap[entries[i]].count;
VM_BUG_ON(atomic_read(counter) != 0);
atomic_set(counter, 2);
pkmap_put(counter);
@@ -207,32 +214,51 @@ got_one:
return pos;
}
-static unsigned long pkmap_insert(struct page *page)
+static unsigned long pkmap_insert(unsigned long pfn, pgprot_t prot)
{
int pos = pkmap_get_free();
unsigned long vaddr = PKMAP_ADDR(pos);
pte_t *ptep = &pkmap_page_table[pos];
- pte_t entry = mk_pte(page, kmap_prot);
- atomic_t *counter = &pkmap_count[pos];
+ pte_t entry = pfn_pte(pfn, prot);
+ atomic_t *counter = &pkmap[pos].count;
VM_BUG_ON(atomic_read(counter) != 0);
-
set_pte_at(&init_mm, vaddr, ptep, entry);
- if (unlikely(!__set_page_address(page, (void *)vaddr, pos))) {
+
+ pkmap[pos].pfn =
+ !(pgprot_val(prot) == pgprot_val(kmap_prot) && pfn_valid(pfn));
+
+ if (!pkmap[pos].pfn) {
+ struct page *page = pfn_to_page(pfn);
+
+ if (unlikely(!__set_page_address(page, (void *)vaddr, pos))) {
+ /*
+ * concurrent pkmap_inserts for this page -
+ * the other won the race, release this entry.
+ *
+ * we can still clear the pte without a tlb flush since
+ * it couldn't have been used yet.
+ */
+ pte_clear(&init_mm, vaddr, ptep);
+ VM_BUG_ON(atomic_read(counter) != 0);
+ atomic_set(counter, 2);
+ pkmap_put(counter);
+ return 0;
+ }
+ } else {
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
/*
- * concurrent pkmap_inserts for this page -
- * the other won the race, release this entry.
+ * non-default prot and pure pfn memory doesn't
+ * get map deduplication, nor a working page_address
*
- * we can still clear the pte without a tlb flush since
- * it couldn't have been used yet.
+ * this also makes it incompatible with
+ * ARCH_NEEDS_KMAP_HIGH_GET
*/
- pte_clear(&init_mm, vaddr, ptep);
- VM_BUG_ON(atomic_read(counter) != 0);
- atomic_set(counter, 2);
- pkmap_put(counter);
- vaddr = 0;
- } else
- atomic_set(counter, 2);
+ BUG();
+#endif
+ }
+
+ atomic_set(counter, 2);
return vaddr;
}
@@ -313,20 +339,17 @@ static void kunmap_account(void)
wake_up(&pkmap_wait);
}
-void *kmap_high(struct page *page)
+void *kmap_get(struct page *page)
{
unsigned long vaddr;
-
-
- kmap_account();
again:
vaddr = (unsigned long)page_address(page);
if (vaddr) {
- atomic_t *counter = &pkmap_count[PKMAP_NR(vaddr)];
+ atomic_t *counter = &pkmap[PKMAP_NR(vaddr)].count;
if (atomic_inc_not_zero(counter)) {
/*
- * atomic_inc_not_zero implies a (memory) barrier on success
- * so page address will be reloaded.
+ * atomic_inc_not_zero implies a (memory) barrier on
+ * success so page address will be reloaded.
*/
unsigned long vaddr2 = (unsigned long)page_address(page);
if (likely(vaddr == vaddr2))
@@ -341,19 +364,49 @@ again:
* reused.
*/
pkmap_put(counter);
- goto again;
}
+ goto again;
}
+ return (void *)vaddr;
+}
- vaddr = pkmap_insert(page);
- if (!vaddr)
- goto again;
+void *kmap_high(struct page *page)
+{
+ unsigned long vaddr;
+
+ kmap_account();
+
+again:
+ vaddr = (unsigned long)kmap_get(page);
+ if (!vaddr) {
+ vaddr = pkmap_insert(page_to_pfn(page), kmap_prot);
+ if (!vaddr)
+ goto again;
+ }
return (void *)vaddr;
}
EXPORT_SYMBOL(kmap_high);
+void *kmap_pfn_prot(unsigned long pfn, pgprot_t prot)
+{
+ unsigned long vaddr;
+
+ if (pgprot_val(prot) == pgprot_val(kmap_prot) &&
+ pfn_valid(pfn) && PageHighMem(pfn_to_page(pfn)))
+ return kmap_high(pfn_to_page(pfn));
+
+ kmap_account();
+
+ vaddr = pkmap_insert(pfn, prot);
+ BUG_ON(!vaddr);
+
+ return (void *)vaddr;
+}
+
+EXPORT_SYMBOL(kmap_pfn_prot);
+
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
/**
* kmap_high_get - pin a highmem page into memory
@@ -370,21 +423,26 @@ void *kmap_high_get(struct page *page)
unsigned long vaddr, flags;
lock_kmap_any(flags);
- vaddr = (unsigned long)page_address(page);
- if (vaddr) {
- BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
- pkmap_count[PKMAP_NR(vaddr)]++;
- }
+ vaddr = (unsigned long)kmap_get(page);
unlock_kmap_any(flags);
- return (void*) vaddr;
+ return (void *)vaddr;
}
#endif
- void kunmap_high(struct page *page)
+void kunmap_virt(void *ptr)
+{
+ unsigned long vaddr = (unsigned long)ptr;
+ if (vaddr < PKMAP_ADDR(0) || vaddr >= PKMAP_ADDR(LAST_PKMAP))
+ return;
+ pkmap_put(&pkmap[PKMAP_NR(vaddr)].count);
+ kunmap_account();
+}
+
+void kunmap_high(struct page *page)
{
unsigned long vaddr = (unsigned long)page_address(page);
BUG_ON(!vaddr);
- pkmap_put(&pkmap_count[PKMAP_NR(vaddr)]);
+ pkmap_put(&pkmap[PKMAP_NR(vaddr)].count);
kunmap_account();
}
@@ -539,8 +597,8 @@ void __init page_address_init(void)
#ifdef CONFIG_HIGHMEM
int i;
- for (i = 0; i < ARRAY_SIZE(pkmap_count); i++)
- atomic_set(&pkmap_count[i], 1);
+ for (i = 0; i < ARRAY_SIZE(pkmap); i++)
+ atomic_set(&pkmap[i].count, 1);
atomic_set(&pkmap_hand, 0);
atomic_set(&pkmap_free, LAST_PKMAP);
atomic_set(&pkmap_users, 0);
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index 0d83edcfc402..2d4d05d92fda 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -63,12 +63,11 @@ int ima_inode_alloc(struct inode *inode)
spin_lock(&ima_iint_lock);
rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
spin_unlock(&ima_iint_lock);
+ radix_tree_preload_end();
out:
if (rc < 0)
kmem_cache_free(iint_cache, iint);
- radix_tree_preload_end();
-
return rc;
}