Discussion:
[Patch Part3 V2 21/21] pci, ACPI, iommu: enhance pci_root to support DMAR device hotplug
Jiang Liu
2014-05-06 07:05:32 UTC
Permalink
Finally enhance pci_root driver to support DMAR device hotplug when
hot-plugging PCI host bridges.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/acpi/pci_root.c | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d388f13d48b4..99c2b9761c12 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/pci-aspm.h>
+#include <linux/dmar.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/apei.h> /* for acpi_hest_init() */
@@ -511,6 +512,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
struct acpi_pci_root *root;
acpi_handle handle = device->handle;
int no_aspm = 0, clear_aspm = 0;
+ bool hotadd = system_state != SYSTEM_BOOTING;

root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -557,6 +559,11 @@ static int acpi_pci_root_add(struct acpi_device *device,
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;

+ if (hotadd && dmar_device_add(handle)) {
+ result = -ENXIO;
+ goto end;
+ }
+
pr_info(PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
@@ -583,7 +590,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->segment, (unsigned int)root->secondary.start);
device->driver_data = NULL;
result = -ENODEV;
- goto end;
+ goto remove_dmar;
}

if (clear_aspm) {
@@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);

- if (system_state != SYSTEM_BOOTING) {
+ if (hotadd) {
pcibios_resource_survey_bus(root->bus);
pci_assign_unassigned_root_bus_resources(root->bus);
}
@@ -607,6 +614,9 @@ static int acpi_pci_root_add(struct acpi_device *device,
pci_unlock_rescan_remove();
return 1;

+remove_dmar:
+ if (hotadd)
+ dmar_device_remove(handle);
end:
kfree(root);
return result;
@@ -625,6 +635,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)

pci_remove_root_bus(root->bus);

+ dmar_device_remove(device->handle);
+
pci_unlock_rescan_remove();

kfree(root);
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:20 UTC
Permalink
Simplify error handling path by changing iommu_{enable|disable}_translation
to return void.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 18 +++++-------------
1 file changed, 5 insertions(+), 13 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 5e865c1265ad..75d7c5e3f77d 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1391,7 +1391,7 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}

-static int iommu_enable_translation(struct intel_iommu *iommu)
+static void iommu_enable_translation(struct intel_iommu *iommu)
{
u32 sts;
unsigned long flags;
@@ -1405,10 +1405,9 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
readl, (sts & DMA_GSTS_TES), sts);

raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
- return 0;
}

-static int iommu_disable_translation(struct intel_iommu *iommu)
+static void iommu_disable_translation(struct intel_iommu *iommu)
{
u32 sts;
unsigned long flag;
@@ -1422,7 +1421,6 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
readl, (!(sts & DMA_GSTS_TES)), sts);

raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- return 0;
}


@@ -2875,11 +2873,7 @@ static int __init init_dmars(void)

iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-
- ret = iommu_enable_translation(iommu);
- if (ret)
- goto free_iommu;
-
+ iommu_enable_translation(iommu);
iommu_disable_protect_mem_regions(iommu);
}

@@ -3577,10 +3571,8 @@ static int init_iommu_hw(void)

iommu->flush.flush_context(iommu, 0, 0, 0,
DMA_CCMD_GLOBAL_INVL);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH);
- if (iommu_enable_translation(iommu))
- return 1;
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ iommu_enable_translation(iommu);
iommu_disable_protect_mem_regions(iommu);
}
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:25 UTC
Permalink
Introduce helper function dmar_walk_resources to walk resource entries
in DMAR table and ACPI buffer object returned by ACPI _DSM method
for IOMMU hot-plug.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/dmar.c | 208 +++++++++++++++++++++++--------------------
drivers/iommu/intel-iommu.c | 4 +-
include/linux/dmar.h | 19 ++--
3 files changed, 122 insertions(+), 109 deletions(-)

diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 39f8b717fe84..acfceb25a2d2 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -43,6 +43,14 @@

#include "irq_remapping.h"

+typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
+struct dmar_res_callback {
+ dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
+ void *arg[ACPI_DMAR_TYPE_RESERVED];
+ bool ignore_unhandled;
+ bool print_entry;
+};
+
/*
* Assumptions:
* 1) The hotplug framework guarentees that DMAR unit will be hot-added
@@ -332,7 +340,7 @@ static struct notifier_block dmar_pci_bus_nb = {
* present in the platform
*/
static int __init
-dmar_parse_one_drhd(struct acpi_dmar_header *header)
+dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_hardware_unit *drhd;
struct dmar_drhd_unit *dmaru;
@@ -363,6 +371,10 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return ret;
}
dmar_register_drhd_unit(dmaru);
+
+ if (arg)
+ (*(int *)arg)++;
+
return 0;
}

@@ -375,7 +387,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
kfree(dmaru);
}

-static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
+static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
+ void *arg)
{
struct acpi_dmar_andd *andd = (void *)header;

@@ -397,7 +410,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)

#ifdef CONFIG_ACPI_NUMA
static int __init
-dmar_parse_one_rhsa(struct acpi_dmar_header *header)
+dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_rhsa *rhsa;
struct dmar_drhd_unit *drhd;
@@ -424,6 +437,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header)

return 0;
}
+#else
+#define dmar_parse_one_rhsa dmar_res_noop
#endif

static void __init
@@ -485,6 +500,52 @@ static int __init dmar_table_detect(void)
return (ACPI_SUCCESS(status) ? 1 : 0);
}

+static int dmar_walk_resources(struct acpi_dmar_header *start, size_t len,
+ struct dmar_res_callback *cb)
+{
+ int ret = 0;
+ struct acpi_dmar_header *iter, *next;
+ struct acpi_dmar_header *end = ((void *)start) + len;
+
+ for (iter = start; iter < end && ret == 0; iter = next) {
+ next = (void *)iter + iter->length;
+ if (iter->length == 0) {
+ /* Avoid looping forever on bad ACPI tables */
+ pr_debug(FW_BUG "Invalid 0-length structure\n");
+ break;
+ } else if (next > end) {
+ /* Avoid passing table end */
+ pr_warn(FW_BUG "record passes table end\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (cb->print_entry)
+ dmar_table_print_dmar_entry(iter);
+
+ if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
+ /* continue for forward compatibility */
+ pr_debug("Unknown DMAR structure type %d\n",
+ iter->type);
+ } else if (cb->cb[iter->type]) {
+ ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
+ } else if (!cb->ignore_unhandled) {
+ pr_warn("No handler for DMAR structure type %d\n",
+ iter->type);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
+ struct dmar_res_callback *cb)
+{
+ return dmar_walk_resources((struct acpi_dmar_header *)(dmar + 1),
+ dmar->header.length - sizeof(*dmar), cb);
+}
+
/**
* parse_dmar_table - parses the DMA reporting table
*/
@@ -492,9 +553,18 @@ static int __init
parse_dmar_table(void)
{
struct acpi_table_dmar *dmar;
- struct acpi_dmar_header *entry_header;
int ret = 0;
int drhd_count = 0;
+ struct dmar_res_callback cb = {
+ .print_entry = true,
+ .ignore_unhandled = true,
+ .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
+ .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
+ .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
+ .cb[ACPI_DMAR_TYPE_ATSR] = &dmar_parse_one_atsr,
+ .cb[ACPI_DMAR_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
+ .cb[ACPI_DMAR_TYPE_ANDD] = &dmar_parse_one_andd,
+ };

/*
* Do it again, earlier dmar_tbl mapping could be mapped with
@@ -519,50 +589,10 @@ parse_dmar_table(void)

pr_info("Host address width %d\n", dmar->width + 1);

- entry_header = (struct acpi_dmar_header *)(dmar + 1);
- while (((unsigned long)entry_header) <
- (((unsigned long)dmar) + dmar_tbl->length)) {
- /* Avoid looping forever on bad ACPI tables */
- if (entry_header->length == 0) {
- pr_warn("Invalid 0-length structure\n");
- ret = -EINVAL;
- break;
- }
-
- dmar_table_print_dmar_entry(entry_header);
-
- switch (entry_header->type) {
- case ACPI_DMAR_TYPE_HARDWARE_UNIT:
- drhd_count++;
- ret = dmar_parse_one_drhd(entry_header);
- break;
- case ACPI_DMAR_TYPE_RESERVED_MEMORY:
- ret = dmar_parse_one_rmrr(entry_header);
- break;
- case ACPI_DMAR_TYPE_ATSR:
- ret = dmar_parse_one_atsr(entry_header);
- break;
- case ACPI_DMAR_HARDWARE_AFFINITY:
-#ifdef CONFIG_ACPI_NUMA
- ret = dmar_parse_one_rhsa(entry_header);
-#endif
- break;
- case ACPI_DMAR_TYPE_ANDD:
- ret = dmar_parse_one_andd(entry_header);
- break;
- default:
- pr_warn("Unknown DMAR structure type %d\n",
- entry_header->type);
- ret = 0; /* for forward compatibility */
- break;
- }
- if (ret)
- break;
-
- entry_header = ((void *)entry_header + entry_header->length);
- }
- if (drhd_count == 0)
+ ret = dmar_walk_dmar_table(dmar, &cb);
+ if (ret == 0 && drhd_count == 0)
pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
+
return ret;
}

@@ -761,76 +791,60 @@ static void warn_invalid_dmar(u64 addr, const char *message)
dmi_get_system_info(DMI_PRODUCT_VERSION));
}

-static int __init check_zero_address(void)
+static int __ref
+dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
{
- struct acpi_table_dmar *dmar;
- struct acpi_dmar_header *entry_header;
struct acpi_dmar_hardware_unit *drhd;
+ void __iomem *addr;
+ u64 cap, ecap;

- dmar = (struct acpi_table_dmar *)dmar_tbl;
- entry_header = (struct acpi_dmar_header *)(dmar + 1);
-
- while (((unsigned long)entry_header) <
- (((unsigned long)dmar) + dmar_tbl->length)) {
- /* Avoid looping forever on bad ACPI tables */
- if (entry_header->length == 0) {
- pr_warn("Invalid 0-length structure\n");
- return 0;
- }
-
- if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
- void __iomem *addr;
- u64 cap, ecap;
-
- drhd = (void *)entry_header;
- if (!drhd->address) {
- warn_invalid_dmar(0, "");
- goto failed;
- }
+ drhd = (void *)entry;
+ if (!drhd->address) {
+ warn_invalid_dmar(0, "");
+ return -EINVAL;
+ }

- addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
- if (!addr ) {
- printk("IOMMU: can't validate: %llx\n", drhd->address);
- goto failed;
- }
- cap = dmar_readq(addr + DMAR_CAP_REG);
- ecap = dmar_readq(addr + DMAR_ECAP_REG);
- early_iounmap(addr, VTD_PAGE_SIZE);
- if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
- warn_invalid_dmar(drhd->address,
- " returns all ones");
- goto failed;
- }
- }
+ addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
+ if (!addr) {
+ pr_warn("IOMMU: can't validate: %llx\n", drhd->address);
+ return -EINVAL;
+ }
+ cap = dmar_readq(addr + DMAR_CAP_REG);
+ ecap = dmar_readq(addr + DMAR_ECAP_REG);
+ early_iounmap(addr, VTD_PAGE_SIZE);

- entry_header = ((void *)entry_header + entry_header->length);
+ if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
+ warn_invalid_dmar(drhd->address, " returns all ones");
+ return -EINVAL;
}
- return 1;

-failed:
return 0;
}

int __init detect_intel_iommu(void)
{
int ret;
+ struct dmar_res_callback validate_drhd_cb = {
+ .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
+ .ignore_unhandled = true,
+ };

down_write(&dmar_global_lock);
ret = dmar_table_detect();
if (ret)
- ret = check_zero_address();
- {
- if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
- iommu_detected = 1;
- /* Make sure ACS will be enabled */
- pci_request_acs();
- }
+ ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
+ &validate_drhd_cb);
+ if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+ iommu_detected = 1;
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
+ }

#ifdef CONFIG_X86
- if (ret)
- x86_init.iommu.iommu_init = intel_iommu_init;
+ if (ret)
+ x86_init.iommu.iommu_init = intel_iommu_init;
#endif
- }
+
early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
up_write(&dmar_global_lock);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 080b4f24bd7c..b696bcd3f6e0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3652,7 +3652,7 @@ static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */


-int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru;
@@ -3678,7 +3678,7 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
return 0;
}

-int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 1deece46a0ca..fac8ca34f9a8 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -115,22 +115,21 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
extern int detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void);

+static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
+{
+ return 0;
+}
+
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
-extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
-extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
+extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
+extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
-static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
-{
- return 0;
-}
-static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
-{
- return 0;
-}
+#define dmar_parse_one_rmrr dmar_res_noop
+#define dmar_parse_one_atsr dmar_res_noop
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
return 0;
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:18 UTC
Permalink
Virtual machine domains are created by intel_iommu_domain_init() and
should be destroyed by intel_iommu_domain_destroy(). So avoid freeing
virtual machine domain data structure in free_dmar_iommu() when
doamin->iommu_count reaches zero, otherwise it may cause invalid
memory access because the IOMMU framework still holds references
to the domain structure.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1f10fcd1c696..5e865c1265ad 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1482,7 +1482,8 @@ static void free_dmar_iommu(struct intel_iommu *iommu)

domain = iommu->domains[i];
clear_bit(i, iommu->domain_ids);
- if (domain_detach_iommu(domain, iommu) == 0)
+ if (domain_detach_iommu(domain, iommu) == 0 &&
+ !domain_type_is_vm(domain))
domain_exit(domain);
}
}
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:14 UTC
Permalink
Introduce domain_type_is_vm() and domain_type_is_vm_or_si() to improve
code readability.

Also kill useless macro DOMAIN_FLAG_P2P_MULTIPLE_DEVICES.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 59 +++++++++++++++++++------------------------
1 file changed, 26 insertions(+), 33 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0b3e6981a2da..63f351a39c63 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -320,16 +320,13 @@ static inline int first_pte_in_page(struct dma_pte *pte)
static struct dmar_domain *si_domain;
static int hw_pass_through = 1;

-/* devices under the same p2p bridge are owned in one domain */
-#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
-
/* domain represents a virtual machine, more than one devices
* across iommus may be owned in one domain, e.g. kvm guest.
*/
-#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
+#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)

/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
+#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)

/* define the limit of IOMMUs supported in each domain */
#ifdef CONFIG_X86
@@ -539,6 +536,16 @@ void free_iova_mem(struct iova *iova)
kmem_cache_free(iommu_iova_cache, iova);
}

+static inline int domain_type_is_vm(struct dmar_domain *domain)
+{
+ return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
+}
+
+static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
+{
+ return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
+ DOMAIN_FLAG_STATIC_IDENTITY);
+}

static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
@@ -579,9 +586,7 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
int iommu_id;

/* si_domain and vm domain should not get here. */
- BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
- BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
-
+ BUG_ON(domain_type_is_vm_or_si(domain));
iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
return NULL;
@@ -1499,7 +1504,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
free_context_table(iommu);
}

-static struct dmar_domain *alloc_domain(bool vm)
+static struct dmar_domain *alloc_domain(int flags)
{
/* domain id for virtual machine, it won't be set in context */
static atomic_t vm_domid = ATOMIC_INIT(0);
@@ -1509,16 +1514,13 @@ static struct dmar_domain *alloc_domain(bool vm)
if (!domain)
return NULL;

+ memset(domain, 0, sizeof(*domain));
domain->nid = -1;
- domain->iommu_count = 0;
- memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
- domain->flags = 0;
+ domain->flags = flags;
spin_lock_init(&domain->iommu_lock);
INIT_LIST_HEAD(&domain->devices);
- if (vm) {
+ if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
domain->id = atomic_inc_return(&vm_domid);
- domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
- }

return domain;
}
@@ -1706,7 +1708,7 @@ static void domain_exit(struct dmar_domain *domain)
/* clear attached or cached domains */
rcu_read_lock();
for_each_active_iommu(iommu, drhd)
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
+ if (domain_type_is_vm(domain) ||
test_bit(iommu->seq_id, domain->iommu_bmp))
iommu_detach_domain(domain, iommu);
rcu_read_unlock();
@@ -1748,8 +1750,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
id = domain->id;
pgd = domain->pgd;

- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
- domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
+ if (domain_type_is_vm_or_si(domain)) {
int found = 0;

/* find an available domain id for this device in iommu */
@@ -2115,7 +2116,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
iommu_disable_dev_iotlb(info);
iommu_detach_dev(info->iommu, info->bus, info->devfn);

- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) {
+ if (domain_type_is_vm(domain)) {
iommu_detach_dependent_devices(info->iommu, info->dev);
/* clear this iommu in iommu_bmp, update iommu count
* and capabilities
@@ -2181,8 +2182,6 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
info->dev = dev;
info->domain = domain;
info->iommu = iommu;
- if (!dev)
- domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;

spin_lock_irqsave(&device_domain_lock, flags);
if (dev)
@@ -2257,7 +2256,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
goto error;

/* Allocate and initialize new domain for the device */
- domain = alloc_domain(false);
+ domain = alloc_domain(0);
if (!domain)
goto error;
if (iommu_attach_domain(domain, iommu)) {
@@ -2421,12 +2420,10 @@ static int __init si_domain_init(int hw)
struct intel_iommu *iommu;
int nid, ret = 0;

- si_domain = alloc_domain(false);
+ si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
if (!si_domain)
return -EFAULT;

- si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
-
for_each_active_iommu(iommu, drhd) {
ret = iommu_attach_domain(si_domain, iommu);
if (ret) {
@@ -3860,9 +3857,7 @@ static int device_notifier(struct notifier_block *nb,

down_read(&dmar_global_lock);
domain_remove_one_dev_info(domain, dev);
- if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
- !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
- list_empty(&domain->devices))
+ if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
domain_exit(domain);
up_read(&dmar_global_lock);

@@ -4111,8 +4106,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);

- if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
- !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
+ if (!domain_type_is_vm_or_si(domain)) {
spin_lock_irqsave(&iommu->lock, tmp_flags);
clear_bit(domain->id, iommu->domain_ids);
iommu->domains[domain->id] = NULL;
@@ -4151,7 +4145,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain;

- dmar_domain = alloc_domain(true);
+ dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
if (!dmar_domain) {
printk(KERN_ERR
"intel_iommu_domain_init: dmar_domain == NULL\n");
@@ -4195,8 +4189,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,

old_domain = find_domain(dev);
if (old_domain) {
- if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
- dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
+ if (domain_type_is_vm_or_si(dmar_domain))
domain_remove_one_dev_info(old_domain, dev);
else
domain_remove_dev_info(old_domain);
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:22 UTC
Permalink
Introduce helper function domain_pfn_within_range() to simplify code
and improve readability.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 30 ++++++++++++++++--------------
1 file changed, 16 insertions(+), 14 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index cf083c8a223e..7b54f4a08bb9 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -549,6 +549,14 @@ static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
DOMAIN_FLAG_STATIC_IDENTITY);
}

+static inline int domain_pfn_supported(struct dmar_domain *domain,
+ unsigned long pfn)
+{
+ int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
+
+ return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
+}
+
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
@@ -822,14 +830,13 @@ out:
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn, int *target_level)
{
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw);
int offset;

BUG_ON(!domain->pgd);

- if (addr_width < BITS_PER_LONG && pfn >> addr_width)
+ if (!domain_pfn_supported(domain, pfn))
/* Address beyond IOMMU's addressing capabilities. */
return NULL;

@@ -914,12 +921,11 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;

- BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
- BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(!domain_pfn_supported(domain, start_pfn));
+ BUG_ON(!domain_pfn_supported(domain, last_pfn));
BUG_ON(start_pfn > last_pfn);

/* we don't need lock here; nobody else touches the iova range */
@@ -980,10 +986,8 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-
- BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
- BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(!domain_pfn_supported(domain, start_pfn));
+ BUG_ON(!domain_pfn_supported(domain, last_pfn));
BUG_ON(start_pfn > last_pfn);

dma_pte_clear_range(domain, start_pfn, last_pfn);
@@ -1085,11 +1089,10 @@ struct page *domain_unmap(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct page *freelist = NULL;

- BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
- BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(!domain_pfn_supported(domain, start_pfn));
+ BUG_ON(!domain_pfn_supported(domain, last_pfn));
BUG_ON(start_pfn > last_pfn);

/* we don't need lock here; nobody else touches the iova range */
@@ -1993,12 +1996,11 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
{
struct dma_pte *first_pte = NULL, *pte = NULL;
phys_addr_t uninitialized_var(pteval);
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned long sg_res;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;

- BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
+ BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));

if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
return -EINVAL;
--
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-hotplug" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Jiang Liu
2014-05-06 07:05:28 UTC
Permalink
According to Intel VT-d specification, _DSM method to support DMAR
hotplug should exist directly under corresponding ACPI object
representing PCI host bridge. But some BIOSes doesn't conform to
this, so search for _DSM method in the subtree starting from the
ACPI object representing the PCI host bridge.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/dmar.c | 35 +++++++++++++++++++++++++++++++----
1 file changed, 31 insertions(+), 4 deletions(-)

diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 2006e1cd8c0f..a1b436b105a0 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1921,21 +1921,48 @@ static int dmar_hotplug_remove(acpi_handle handle)
return ret;
}

-static int dmar_device_hotplug(acpi_handle handle, bool insert)
+static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
+ void *context, void **retval)
+{
+ acpi_handle *phdl = retval;
+
+ if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
+ *phdl = handle;
+ return AE_CTRL_TERMINATE;
+ }
+
+ return AE_OK;
+}
+
+int dmar_device_hotplug(acpi_handle handle, bool insert)
{
int ret;
+ acpi_handle tmp = NULL;
+ acpi_status status;

if (!dmar_in_use())
return 0;

- if (!dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD))
+ if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
+ tmp = handle;
+ } else {
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+ ACPI_UINT32_MAX,
+ dmar_get_dsm_handle,
+ NULL, NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("Failed to locate _DSM method.\n");
+ return -ENXIO;
+ }
+ }
+ if (tmp == NULL)
return 0;

down_write(&dmar_global_lock);
if (insert)
- ret = dmar_hotplug_insert(handle);
+ ret = dmar_hotplug_insert(tmp);
else
- ret = dmar_hotplug_remove(handle);
+ ret = dmar_hotplug_remove(tmp);
up_write(&dmar_global_lock);

return ret;
--
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-hotplug" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Jiang Liu
2014-05-06 07:05:30 UTC
Permalink
Enhance error recovery in function intel_enable_irq_remapping()
by tearing down all created data structures.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel_irq_remapping.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 9f1c86566452..f65b68d3de10 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -673,9 +673,11 @@ static int __init intel_enable_irq_remapping(void)
return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;

error:
- /*
- * handle error condition gracefully here!
- */
+ for_each_iommu(iommu, drhd)
+ if (ecap_ir_support(iommu->ecap)) {
+ iommu_disable_irq_remapping(iommu);
+ intel_teardown_irq_remapping(iommu);
+ }

if (x2apic_present)
pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
--
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-hotplug" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Jiang Liu
2014-05-06 07:05:23 UTC
Permalink
Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 7 +++----
include/linux/iova.h | 5 +++++
2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 7b54f4a08bb9..0ebea100108f 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3116,10 +3116,10 @@ static void flush_unmaps(void)
/* On real hardware multiple invalidations are expensive */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain->id,
- iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1,
+ iova->pfn_lo, iova_size(iova),
!deferred_flush[i].freelist[j], 0);
else {
- mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
+ mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
(uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
}
@@ -3902,8 +3902,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
rcu_read_lock();
for_each_active_iommu(iommu, drhd)
iommu_flush_iotlb_psi(iommu, si_domain->id,
- iova->pfn_lo,
- iova->pfn_hi - iova->pfn_lo + 1,
+ iova->pfn_lo, iova_size(iova),
!freelist, 0);
rcu_read_unlock();
dma_free_pagelist(freelist);
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 3277f4711349..19e81d5ccb6d 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -34,6 +34,11 @@ struct iova_domain {
unsigned long dma_32bit_pfn;
};

+static inline unsigned long iova_size(struct iova *iova)
+{
+ return iova->pfn_hi - iova->pfn_lo + 1;
+}
+
struct iova *alloc_iova_mem(void);
void free_iova_mem(struct iova *iova);
void free_iova(struct iova_domain *iovad, unsigned long pfn);
--
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-hotplug" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Jiang Liu
2014-05-06 07:05:13 UTC
Permalink
For virtual machine domains, domain->id is a virtual id, and the real
domain id written into context entry is dynamically allocated.
So use the real domain id instead of domain->id when flushing iotlbs
for virtual machine domains.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 612fd441b225..0b3e6981a2da 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1824,7 +1824,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
- iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
+ iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
} else {
iommu_flush_write_buffer(iommu);
}
--
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-hotplug" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Jiang Liu
2014-05-06 07:05:17 UTC
Permalink
Static identity and virtual machine domains may be cached in
iommu->domain_ids array after corresponding IOMMUs have been removed
from domain->iommu_bmp. So we should check domain->iommu_bmp before
decreasing domain->iommu_count in function free_dmar_iommu(), otherwise
it may cause free of inuse domain data structure.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0ab0cd236b16..1f10fcd1c696 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -425,6 +425,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct device *dev);
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
struct device *dev);
+static int domain_detach_iommu(struct dmar_domain *domain,
+ struct intel_iommu *iommu);

#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
@@ -1467,8 +1469,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
- int i, count;
- unsigned long flags;
+ int i;

if ((iommu->domains) && (iommu->domain_ids)) {
for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
@@ -1481,11 +1482,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)

domain = iommu->domains[i];
clear_bit(i, iommu->domain_ids);
-
- spin_lock_irqsave(&domain->iommu_lock, flags);
- count = --domain->iommu_count;
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
- if (count == 0)
+ if (domain_detach_iommu(domain, iommu) == 0)
domain_exit(domain);
}
}
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:26 UTC
Permalink
Introduce functions to support dynamic IOMMU seq_id allocating and
releasing, which will be used to support DMAR hotplug.

Also rename IOMMU_UNITS_SUPPORTED as DMAR_UNITS_SUPPORTED.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/dmar.c | 40 ++++++++++++++++++++++++++++++++++------
drivers/iommu/intel-iommu.c | 13 +++----------
include/linux/dmar.h | 6 ++++++
3 files changed, 43 insertions(+), 16 deletions(-)

diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index acfceb25a2d2..7a871607cedb 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -69,6 +69,7 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
static int dmar_dev_scope_status = 1;
+static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];

static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
@@ -928,11 +929,32 @@ out:
return err;
}

+static int dmar_alloc_seq_id(struct intel_iommu *iommu)
+{
+ iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
+ DMAR_UNITS_SUPPORTED);
+ if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
+ iommu->seq_id = -1;
+ } else {
+ set_bit(iommu->seq_id, dmar_seq_ids);
+ sprintf(iommu->name, "dmar%d", iommu->seq_id);
+ }
+
+ return iommu->seq_id;
+}
+
+static void dmar_free_seq_id(struct intel_iommu *iommu)
+{
+ if (iommu->seq_id >= 0) {
+ clear_bit(iommu->seq_id, dmar_seq_ids);
+ iommu->seq_id = -1;
+ }
+}
+
static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
- static int iommu_allocated = 0;
int agaw = 0;
int msagaw = 0;
int err;
@@ -946,13 +968,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (!iommu)
return -ENOMEM;

- iommu->seq_id = iommu_allocated++;
- sprintf (iommu->name, "dmar%d", iommu->seq_id);
+ if (dmar_alloc_seq_id(iommu) < 0) {
+ pr_err("IOMMU: failed to allocate seq_id\n");
+ err = -ENOSPC;
+ goto error;
+ }

err = map_iommu(iommu, drhd->reg_base_addr);
if (err) {
pr_err("IOMMU: failed to map %s\n", iommu->name);
- goto error;
+ goto error_free_seq_id;
}

err = -EINVAL;
@@ -996,9 +1021,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
drhd->iommu = iommu;
return 0;

- err_unmap:
+err_unmap:
unmap_iommu(iommu);
- error:
+error_free_seq_id:
+ dmar_free_seq_id(iommu);
+error:
kfree(iommu);
return err;
}
@@ -1020,6 +1047,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);

+ dmar_free_seq_id(iommu);
kfree(iommu);
}

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b696bcd3f6e0..361de6697d8a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -328,17 +328,10 @@ static int hw_pass_through = 1;
/* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)

-/* define the limit of IOMMUs supported in each domain */
-#ifdef CONFIG_X86
-# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
-#else
-# define IOMMU_UNITS_SUPPORTED 64
-#endif
-
struct dmar_domain {
int id; /* domain id */
int nid; /* node id */
- DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
+ DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
/* bitmap of iommus this domain uses*/

struct list_head devices; /* all devices' list */
@@ -2719,12 +2712,12 @@ static int __init init_dmars(void)
* threaded kernel __init code path all other access are read
* only
*/
- if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
+ if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
g_num_of_iommus++;
continue;
}
printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
- IOMMU_UNITS_SUPPORTED);
+ DMAR_UNITS_SUPPORTED);
}

g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index fac8ca34f9a8..c8a576bc3a98 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -30,6 +30,12 @@

struct acpi_dmar_header;

+#ifdef CONFIG_X86
+# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
+#else
+# define DMAR_UNITS_SUPPORTED 64
+#endif
+
/* DMAR Flags */
#define DMAR_INTR_REMAP 0x1
#define DMAR_X2APIC_OPT_OUT 0x2
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:24 UTC
Permalink
IOMMU units may dynamically attached to/detached from domains,
so we should scan all active IOMMU units when computing iommu_snooping
flag for a domain instead of only scanning IOMMU units associated
with the domain.

Also check snooping and superpage capabilities when hot-adding DMAR units.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 42 ++++++++++++++++++++++++------------------
1 file changed, 24 insertions(+), 18 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0ebea100108f..080b4f24bd7c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -633,50 +633,56 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
rcu_read_unlock();
}

-static void domain_update_iommu_snooping(struct dmar_domain *domain)
+static int domain_update_iommu_snooping(struct intel_iommu *skip)
{
- int i;
-
- domain->iommu_snooping = 1;
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ int ret = 1;

- for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
- if (!ecap_sc_support(g_iommus[i]->ecap)) {
- domain->iommu_snooping = 0;
- break;
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (iommu != skip) {
+ if (!ecap_sc_support(iommu->ecap)) {
+ ret = 0;
+ break;
+ }
}
}
+ rcu_read_unlock();
+
+ return ret;
}

-static void domain_update_iommu_superpage(struct dmar_domain *domain)
+static int domain_update_iommu_superpage(struct intel_iommu *skip)
{
struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu = NULL;
+ struct intel_iommu *iommu;
int mask = 0xf;

if (!intel_iommu_superpage) {
- domain->iommu_superpage = 0;
- return;
+ return 0;
}

/* set iommu_superpage to the smallest common denominator */
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
- mask &= cap_super_page_val(iommu->cap);
- if (!mask) {
- break;
+ if (iommu != skip) {
+ mask &= cap_super_page_val(iommu->cap);
+ if (!mask)
+ break;
}
}
rcu_read_unlock();

- domain->iommu_superpage = fls(mask);
+ return fls(mask);
}

/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
- domain_update_iommu_snooping(domain);
- domain_update_iommu_superpage(domain);
+ domain->iommu_snooping = domain_update_iommu_snooping(NULL);
+ domain->iommu_superpage = domain_update_iommu_superpage(NULL);
}

static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:19 UTC
Permalink
Simplify include/linux/dmar.h a bit based on the fact that
both CONFIG_INTEL_IOMMU and CONFIG_IRQ_REMAP select CONFIG_DMAR_TABLE.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
include/linux/dmar.h | 50 ++++++++++++++++++--------------------------------
1 file changed, 18 insertions(+), 32 deletions(-)

diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 23c8db129560..1deece46a0ca 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -114,22 +114,30 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
/* Intel IOMMU detection */
extern int detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void);
-#else
-struct dmar_pci_notify_info;
-static inline int detect_intel_iommu(void)
+
+#ifdef CONFIG_INTEL_IOMMU
+extern int iommu_detected, no_iommu;
+extern int intel_iommu_init(void);
+extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
+extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
+extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
+#else /* !CONFIG_INTEL_IOMMU: */
+static inline int intel_iommu_init(void) { return -ENODEV; }
+static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
{
- return -ENODEV;
+ return 0;
}
-
-static inline int dmar_table_init(void)
+static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
{
- return -ENODEV;
+ return 0;
}
-static inline int enable_drhd_fault_handling(void)
+static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
- return -1;
+ return 0;
}
-#endif /* !CONFIG_DMAR_TABLE */
+#endif /* CONFIG_INTEL_IOMMU */
+
+#endif /* CONFIG_DMAR_TABLE */

struct irte {
union {
@@ -177,26 +185,4 @@ extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
extern int arch_setup_dmar_msi(unsigned int irq);

-#ifdef CONFIG_INTEL_IOMMU
-extern int iommu_detected, no_iommu;
-extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
-extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
-extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
-extern int intel_iommu_init(void);
-#else /* !CONFIG_INTEL_IOMMU: */
-static inline int intel_iommu_init(void) { return -ENODEV; }
-static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
-{
- return 0;
-}
-static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
-{
- return 0;
-}
-static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
-{
- return 0;
-}
-#endif /* CONFIG_INTEL_IOMMU */
-
#endif /* __DMAR_H__ */
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:12 UTC
Permalink
For virtual machine and static identity domains, there may be devices
from different PCI segments associated with the same domain.
So function iommu_support_dev_iotlb() should also match PCI segment
number (iommu unit) when searching for dev_iotlb capable devices.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f256ffc02e29..612fd441b225 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1274,7 +1274,8 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,

spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link)
- if (info->bus == bus && info->devfn == devfn) {
+ if (info->iommu == iommu && info->bus == bus &&
+ info->devfn == devfn) {
found = 1;
break;
}
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:31 UTC
Permalink
Implement required callback functions for intel-iommu driver
to support DMAR unit hotplug.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 206 +++++++++++++++++++++++++++++++------------
1 file changed, 151 insertions(+), 55 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f8db3788d08a..fabafb7a0c9a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1127,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
unsigned long flags;

root = (struct root_entry *)alloc_pgtable_page(iommu->node);
- if (!root)
+ if (!root) {
+ pr_err("IOMMU: allocating root entry for %s failed\n",
+ iommu->name);
return -ENOMEM;
+ }

__iommu_flush_cache(iommu, root, ROOT_SIZE);

@@ -1468,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return 0;
}

-static void free_dmar_iommu(struct intel_iommu *iommu)
+static void disable_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
int i;
@@ -1492,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu)

if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
+}

- kfree(iommu->domains);
- kfree(iommu->domain_ids);
- iommu->domains = NULL;
- iommu->domain_ids = NULL;
+static void free_dmar_iommu(struct intel_iommu *iommu)
+{
+ if ((iommu->domains) && (iommu->domain_ids)) {
+ kfree(iommu->domains);
+ kfree(iommu->domain_ids);
+ iommu->domains = NULL;
+ iommu->domain_ids = NULL;
+ }

g_iommus[iommu->seq_id] = NULL;

@@ -2692,6 +2700,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0;
}

+static void intel_iommu_init_qi(struct intel_iommu *iommu)
+{
+ /*
+ * Start from the sane iommu hardware state.
+ * If the queued invalidation is already initialized by us
+ * (for example, while enabling interrupt-remapping) then
+ * we got the things already rolling from a sane state.
+ */
+ if (!iommu->qi) {
+ /*
+ * Clear any previous faults.
+ */
+ dmar_fault(-1, iommu);
+ /*
+ * Disable queued invalidation if supported and already enabled
+ * before OS handover.
+ */
+ dmar_disable_qi(iommu);
+ }
+
+ if (dmar_enable_qi(iommu)) {
+ /*
+ * Queued Invalidate not enabled, use Register Based Invalidate
+ */
+ iommu->flush.flush_context = __iommu_flush_context;
+ iommu->flush.flush_iotlb = __iommu_flush_iotlb;
+ pr_info("IOMMU: %s using Register based invalidation\n",
+ iommu->name);
+ } else {
+ iommu->flush.flush_context = qi_flush_context;
+ iommu->flush.flush_iotlb = qi_flush_iotlb;
+ pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
+ }
+}
+
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
@@ -2720,6 +2763,10 @@ static int __init init_dmars(void)
DMAR_UNITS_SUPPORTED);
}

+ /* Preallocate enough resources for IOMMU hot-addition */
+ if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
+ g_num_of_iommus = DMAR_UNITS_SUPPORTED;
+
g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
GFP_KERNEL);
if (!g_iommus) {
@@ -2748,58 +2795,14 @@ static int __init init_dmars(void)
* among all IOMMU's. Need to Split it later.
*/
ret = iommu_alloc_root_entry(iommu);
- if (ret) {
- printk(KERN_ERR "IOMMU: allocate root entry failed\n");
+ if (ret)
goto free_iommu;
- }
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
}

- /*
- * Start from the sane iommu hardware state.
- */
- for_each_active_iommu(iommu, drhd) {
- /*
- * If the queued invalidation is already initialized by us
- * (for example, while enabling interrupt-remapping) then
- * we got the things already rolling from a sane state.
- */
- if (iommu->qi)
- continue;
-
- /*
- * Clear any previous faults.
- */
- dmar_fault(-1, iommu);
- /*
- * Disable queued invalidation if supported and already enabled
- * before OS handover.
- */
- dmar_disable_qi(iommu);
- }
-
- for_each_active_iommu(iommu, drhd) {
- if (dmar_enable_qi(iommu)) {
- /*
- * Queued Invalidate not enabled, use Register Based
- * Invalidate
- */
- iommu->flush.flush_context = __iommu_flush_context;
- iommu->flush.flush_iotlb = __iommu_flush_iotlb;
- printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
- "invalidation\n",
- iommu->seq_id,
- (unsigned long long)drhd->reg_base_addr);
- } else {
- iommu->flush.flush_context = qi_flush_context;
- iommu->flush.flush_iotlb = qi_flush_iotlb;
- printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
- "invalidation\n",
- iommu->seq_id,
- (unsigned long long)drhd->reg_base_addr);
- }
- }
+ for_each_active_iommu(iommu, drhd)
+ intel_iommu_init_qi(iommu);

if (iommu_pass_through)
iommu_identity_mapping |= IDENTMAP_ALL;
@@ -2885,8 +2888,10 @@ static int __init init_dmars(void)
return 0;

free_iommu:
- for_each_active_iommu(iommu, drhd)
+ for_each_active_iommu(iommu, drhd) {
+ disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
+ }
kfree(deferred_flush);
free_g_iommus:
kfree(g_iommus);
@@ -3771,9 +3776,100 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
return 0;
}

+static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
+{
+ int ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (g_iommus[iommu->seq_id])
+ return 0;
+
+ if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
+ pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+ if (!ecap_sc_support(iommu->ecap) &&
+ domain_update_iommu_snooping(iommu)) {
+ pr_warn("IOMMU: %s doesn't support snooping.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+ if ((cap_super_page_val(iommu->cap) &
+ (1 << domain_update_iommu_superpage(iommu))) == 0) {
+ pr_warn("IOMMU: %s doesn't support large page.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+
+ /*
+ * Disable translation if already enabled prior to OS handover.
+ */
+ if (iommu->gcmd & DMA_GCMD_TE)
+ iommu_disable_translation(iommu);
+
+ g_iommus[iommu->seq_id] = iommu;
+ ret = iommu_init_domains(iommu);
+ if (ret == 0)
+ ret = iommu_alloc_root_entry(iommu);
+ if (ret)
+ goto out;
+
+ if (dmaru->ignored) {
+ /*
+ * we always have to disable PMRs or DMA may fail on this device
+ */
+ if (force_on)
+ iommu_disable_protect_mem_regions(iommu);
+ return 0;
+ }
+
+ intel_iommu_init_qi(iommu);
+ iommu_flush_write_buffer(iommu);
+ ret = dmar_set_interrupt(iommu);
+ if (ret)
+ goto disable_iommu;
+
+ iommu_set_root_entry(iommu);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ iommu_enable_translation(iommu);
+
+ if (si_domain) {
+ ret = iommu_attach_domain(si_domain, iommu);
+ if (ret < 0 || si_domain->id != ret)
+ goto disable_iommu;
+ domain_attach_iommu(si_domain, iommu);
+ }
+
+ iommu_disable_protect_mem_regions(iommu);
+ return 0;
+
+disable_iommu:
+ disable_dmar_iommu(iommu);
+out:
+ free_dmar_iommu(iommu);
+ return ret;
+}
+
int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
- return intel_iommu_enabled ? -ENOSYS : 0;
+ int ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (!intel_iommu_enabled)
+ return 0;
+ if (iommu == NULL)
+ return -EINVAL;
+
+ if (insert) {
+ ret = intel_iommu_add(dmaru);
+ } else {
+ disable_dmar_iommu(iommu);
+ free_dmar_iommu(iommu);
+ }
+
+ return ret;
}

static void intel_iommu_free_dmars(void)
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:21 UTC
Permalink
Introduce intel_unmap() to reduce duplicated code in intel_unmap_sg()
and intel_unmap_page().

Also let dma_pte_free_pagetable() to call dma_pte_clear_range() directly,
so caller only needs to call dma_pte_free_pagetable().

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 77 +++++++++++--------------------------------
1 file changed, 20 insertions(+), 57 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 75d7c5e3f77d..cf083c8a223e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -986,6 +986,8 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
BUG_ON(start_pfn > last_pfn);

+ dma_pte_clear_range(domain, start_pfn, last_pfn);
+
/* We don't need lock here; nobody else touches the iova range */
dma_pte_free_level(domain, agaw_to_level(domain->agaw),
domain->pgd, 0, start_pfn, last_pfn);
@@ -2030,12 +2032,14 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
/* It is large page*/
if (largepage_lvl > 1) {
pteval |= DMA_PTE_LARGE_PAGE;
- /* Ensure that old small page tables are removed to make room
- for superpage, if they exist. */
- dma_pte_clear_range(domain, iov_pfn,
- iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
+ lvl_pages = lvl_to_nr_pages(largepage_lvl);
+ /*
+ * Ensure that old small page tables are
+ * removed to make room for superpage,
+ * if they exist.
+ */
dma_pte_free_pagetable(domain, iov_pfn,
- iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1);
+ iov_pfn + lvl_pages - 1);
} else {
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
}
@@ -3163,9 +3167,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *f
spin_unlock_irqrestore(&async_umap_flush_lock, flags);
}

-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
{
struct dmar_domain *domain;
unsigned long start_pfn, last_pfn;
@@ -3209,6 +3211,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
}
}

+static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ intel_unmap(dev, dev_addr);
+}
+
static void *intel_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
@@ -3245,56 +3254,15 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
- int order;
-
- size = PAGE_ALIGN(size);
- order = get_order(size);
-
- intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
- free_pages((unsigned long)vaddr, order);
+ intel_unmap(dev, dma_handle);
+ free_pages((unsigned long)vaddr, get_order(PAGE_ALIGN(size)));
}

static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- struct dmar_domain *domain;
- unsigned long start_pfn, last_pfn;
- struct iova *iova;
- struct intel_iommu *iommu;
- struct page *freelist;
-
- if (iommu_no_mapping(dev))
- return;
-
- domain = find_domain(dev);
- BUG_ON(!domain);
-
- iommu = domain_get_iommu(domain);
-
- iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
- if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
- (unsigned long long)sglist[0].dma_address))
- return;
-
- start_pfn = mm_to_dma_pfn(iova->pfn_lo);
- last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
-
- freelist = domain_unmap(domain, start_pfn, last_pfn);
-
- if (intel_iommu_strict) {
- iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- last_pfn - start_pfn + 1, !freelist, 0);
- /* free iova */
- __free_iova(&domain->iovad, iova);
- dma_free_pagelist(freelist);
- } else {
- add_unmap(domain, iova, freelist);
- /*
- * queue up the release of the unmap to save the 1/6th of the
- * cpu used up by the iotlb flush operation...
- */
- }
+ intel_unmap(dev, sglist[0].dma_address);
}

static int intel_nontranslate_map_sg(struct device *hddev,
@@ -3358,13 +3326,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele

ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
if (unlikely(ret)) {
- /* clear the page */
- dma_pte_clear_range(domain, start_vpfn,
- start_vpfn + size - 1);
- /* free page tables */
dma_pte_free_pagetable(domain, start_vpfn,
start_vpfn + size - 1);
- /* free iova */
__free_iova(&domain->iovad, iova);
return 0;
}
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:16 UTC
Permalink
Check the same domain id is allocated for si_domain on each IOMMU,
otherwise the IOTLB flush for si_domain will fail.

Now the rules to allocate and manage domain id are:
1) For normal and static identity domains, domain id is allocated
when creating domain structure. And this id will be written into
context entry.
2) For virtual machine domain, a virtual id is allocated when creating
domain. And when binding virtual machine domain to an iommu, a real
domain id is allocated on demand and this domain id will be written
into context entry. So domain->id for virtual machine domain may be
different from the domain id written into context entry(used by
hardware).

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 46 ++++++++++++++++++++++++-------------------
1 file changed, 26 insertions(+), 20 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 29f613e7eea3..0ab0cd236b16 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1551,15 +1551,27 @@ static int iommu_attach_domain(struct dmar_domain *domain,

spin_lock_irqsave(&iommu->lock, flags);
num = __iommu_attach_domain(domain, iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
if (num < 0)
pr_err("IOMMU: no free domain ids\n");
- else
- domain->id = num;
- spin_unlock_irqrestore(&iommu->lock, flags);

return num;
}

+static int iommu_attach_vm_domain(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
+{
+ int num;
+ unsigned long ndomains;
+
+ ndomains = cap_ndoms(iommu->cap);
+ for_each_set_bit(num, iommu->domain_ids, ndomains)
+ if (iommu->domains[num] == domain)
+ return num;
+
+ return __iommu_attach_domain(domain, iommu);
+}
+
static void iommu_detach_domain(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
@@ -1766,8 +1778,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
struct context_entry *context;
unsigned long flags;
struct dma_pte *pgd;
- unsigned long num;
- unsigned long ndomains;
int id;
int agaw;
struct device_domain_info *info = NULL;
@@ -1792,20 +1802,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
pgd = domain->pgd;

if (domain_type_is_vm_or_si(domain)) {
- int found = 0;
-
- /* find an available domain id for this device in iommu */
- ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(num, iommu->domain_ids, ndomains) {
- if (iommu->domains[num] == domain) {
- id = num;
- found = 1;
- break;
- }
- }
-
- if (found == 0) {
- id = __iommu_attach_domain(domain, iommu);
+ if (domain_type_is_vm(domain)) {
+ id = iommu_attach_vm_domain(domain, iommu);
if (id < 0) {
spin_unlock_irqrestore(&iommu->lock, flags);
pr_err("IOMMU: no free domain ids\n");
@@ -2281,7 +2279,8 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
domain = alloc_domain(0);
if (!domain)
goto error;
- if (iommu_attach_domain(domain, iommu) < 0) {
+ domain->id = iommu_attach_domain(domain, iommu);
+ if (domain->id < 0) {
free_domain_mem(domain);
domain = NULL;
goto error;
@@ -2442,6 +2441,7 @@ static int __init si_domain_init(int hw)
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
int nid, ret = 0;
+ bool first = true;

si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
if (!si_domain)
@@ -2452,6 +2452,12 @@ static int __init si_domain_init(int hw)
if (ret < 0) {
domain_exit(si_domain);
return -EFAULT;
+ } else if (first) {
+ si_domain->id = ret;
+ first = false;
+ } else if (si_domain->id != ret) {
+ domain_exit(si_domain);
+ return -EFAULT;
}
domain_attach_iommu(si_domain, iommu);
}
--
1.7.10.4
Jiang Liu
2014-05-06 07:05:27 UTC
Permalink
On Intel platforms, an IO Hub (PCI/PCIe host bridge) may contain DMAR
units, so we need to support DMAR hotplug when supporting PCI host
bridge hotplug on Intel platforms.

According to Section 8.8 "Remapping Hardware Unit Hot Plug" in "Intel
Virtualization Technology for Directed IO Architecture Specification
Rev 2.2", ACPI BIOS should implement ACPI _DSM method under the ACPI
object for the PCI host bridge to support DMAR hotplug.

This patch introduces interfaces to parse ACPI _DSM method for
DMAR unit hotplug. It also implements state machines for DMAR unit
hot-addition and hot-removal.

The PCI host bridge hotplug driver should call dmar_hotplug_hotplug()
before scanning PCI devices connected for hot-addition and after
destroying all PCI devices for hot-removal.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/dmar.c | 268 +++++++++++++++++++++++++++++++++--
drivers/iommu/intel-iommu.c | 78 +++++++++-
drivers/iommu/intel_irq_remapping.c | 5 +
include/linux/dmar.h | 33 +++++
4 files changed, 370 insertions(+), 14 deletions(-)

diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 7a871607cedb..2006e1cd8c0f 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -74,7 +74,7 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);

-static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
+static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
* add INCLUDE_ALL at the tail, so scan the list will find it at
@@ -335,24 +335,45 @@ static struct notifier_block dmar_pci_bus_nb = {
.priority = INT_MIN,
};

+static struct dmar_drhd_unit *
+dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
+{
+ struct dmar_drhd_unit *dmaru;
+
+ list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
+ if (dmaru->segment == drhd->segment &&
+ dmaru->reg_base_addr == drhd->address)
+ return dmaru;
+
+ return NULL;
+}
+
/**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
* present in the platform
*/
-static int __init
-dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
+static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_hardware_unit *drhd;
struct dmar_drhd_unit *dmaru;
int ret = 0;

drhd = (struct acpi_dmar_hardware_unit *)header;
- dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
+ dmaru = dmar_find_dmaru(drhd);
+ if (dmaru)
+ goto out;
+
+ dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
if (!dmaru)
return -ENOMEM;

- dmaru->hdr = header;
+ /*
+ * If header is allocated from slab by ACPI _DSM method, we need to
+ * copy the content because the memory buffer will be freed on return.
+ */
+ dmaru->hdr = (void *)(dmaru + 1);
+ memcpy(dmaru->hdr, header, header->length);
dmaru->reg_base_addr = drhd->address;
dmaru->segment = drhd->segment;
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
@@ -373,6 +394,7 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
}
dmar_register_drhd_unit(dmaru);

+out:
if (arg)
(*(int *)arg)++;

@@ -410,8 +432,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
}

#ifdef CONFIG_ACPI_NUMA
-static int __init
-dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
+static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_rhsa *rhsa;
struct dmar_drhd_unit *drhd;
@@ -805,14 +826,22 @@ dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
return -EINVAL;
}

- addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
+ if (arg)
+ addr = ioremap(drhd->address, VTD_PAGE_SIZE);
+ else
+ addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
if (!addr) {
pr_warn("IOMMU: can't validate: %llx\n", drhd->address);
return -EINVAL;
}
+
cap = dmar_readq(addr + DMAR_CAP_REG);
ecap = dmar_readq(addr + DMAR_ECAP_REG);
- early_iounmap(addr, VTD_PAGE_SIZE);
+
+ if (arg)
+ iounmap(addr);
+ else
+ early_iounmap(addr, VTD_PAGE_SIZE);

if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
warn_invalid_dmar(drhd->address, " returns all ones");
@@ -1681,12 +1710,17 @@ int __init dmar_ir_support(void)
return dmar->flags & 0x1;
}

+/* Check whether DMAR units are in use */
+static inline bool dmar_in_use(void)
+{
+ return irq_remapping_enabled || intel_iommu_enabled;
+}
+
static int __init dmar_free_unused_resources(void)
{
struct dmar_drhd_unit *dmaru, *dmaru_n;

- /* DMAR units are in use */
- if (irq_remapping_enabled || intel_iommu_enabled)
+ if (dmar_in_use())
return 0;

if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
@@ -1704,3 +1738,215 @@ static int __init dmar_free_unused_resources(void)

late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu);
+
+/*
+ * DMAR Hotplug Support
+ * For more details, please refer to Intel(R) Virtualization Technology
+ * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
+ * "Remapping Hardware Unit Hot Plug".
+ */
+static u8 dmar_hp_uuid[] = {
+ /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
+ /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
+};
+
+/*
+ * Currently there's only one revision and BIOS will not check the revision id,
+ * so use 0 for safety.
+ */
+#define DMAR_DSM_REV_ID 0
+#define DMAR_DSM_FUNC_DRHD 1
+#define DMAR_DSM_FUNC_ATSR 2
+#define DMAR_DSM_FUNC_RHSA 3
+
+static inline bool dmar_detect_dsm(acpi_handle handle, int func)
+{
+ return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
+}
+
+static int dmar_walk_dsm_resource(acpi_handle handle, int func,
+ dmar_res_handler_t handler, void *arg)
+{
+ int ret = -ENODEV;
+ union acpi_object *obj;
+ struct acpi_dmar_header *start;
+ struct dmar_res_callback callback;
+ static int res_type[] = {
+ [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
+ [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ATSR,
+ [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_HARDWARE_AFFINITY,
+ };
+
+ if (!dmar_detect_dsm(handle, func))
+ return 0;
+
+ obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
+ func, NULL, ACPI_TYPE_BUFFER);
+ if (!obj)
+ return -ENODEV;
+
+ memset(&callback, 0, sizeof(callback));
+ callback.cb[res_type[func]] = handler;
+ callback.arg[res_type[func]] = arg;
+ start = (struct acpi_dmar_header *)obj->buffer.pointer;
+ ret = dmar_walk_resources(start, obj->buffer.length, &callback);
+
+ ACPI_FREE(obj);
+
+ return ret;
+}
+
+static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ int ret;
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (!dmaru)
+ return -ENODEV;
+
+ ret = dmar_ir_hotplug(dmaru, true);
+ if (ret == 0)
+ ret = dmar_iommu_hotplug(dmaru, true);
+
+ return ret;
+}
+
+static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ int i, ret;
+ struct device *dev;
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (!dmaru)
+ return 0;
+
+ /*
+ * All PCI devices managed by this unit should have been destroyed.
+ */
+ if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
+ for_each_active_dev_scope(dmaru->devices,
+ dmaru->devices_cnt, i, dev)
+ return -EBUSY;
+
+ ret = dmar_ir_hotplug(dmaru, false);
+ if (ret == 0)
+ ret = dmar_iommu_hotplug(dmaru, false);
+
+ return ret;
+}
+
+static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (dmaru) {
+ list_del_rcu(&dmaru->list);
+ synchronize_rcu();
+ dmar_free_drhd(dmaru);
+ }
+
+ return 0;
+}
+
+static int dmar_hotplug_insert(acpi_handle handle)
+{
+ int ret;
+ int drhd_count = 0;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_validate_one_drhd, (void *)1);
+ if (ret)
+ goto out;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_parse_one_drhd, (void *)&drhd_count);
+ if (ret == 0 && drhd_count == 0) {
+ pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
+ goto out;
+ } else if (ret) {
+ goto release_drhd;
+ }
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
+ &dmar_parse_one_rhsa, NULL);
+ if (ret)
+ goto release_drhd;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_parse_one_atsr, NULL);
+ if (ret)
+ goto release_atsr;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_add_drhd, NULL);
+ if (!ret)
+ return 0;
+
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_remove_drhd, NULL);
+release_atsr:
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_release_one_atsr, NULL);
+release_drhd:
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_release_drhd, NULL);
+out:
+ return ret;
+}
+
+static int dmar_hotplug_remove(acpi_handle handle)
+{
+ int ret;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_check_one_atsr, NULL);
+ if (ret)
+ return ret;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_remove_drhd, NULL);
+ if (ret == 0) {
+ WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_release_one_atsr, NULL));
+ WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_release_drhd, NULL));
+ } else {
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_add_drhd, NULL);
+ }
+
+ return ret;
+}
+
+static int dmar_device_hotplug(acpi_handle handle, bool insert)
+{
+ int ret;
+
+ if (!dmar_in_use())
+ return 0;
+
+ if (!dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD))
+ return 0;
+
+ down_write(&dmar_global_lock);
+ if (insert)
+ ret = dmar_hotplug_insert(handle);
+ else
+ ret = dmar_hotplug_remove(handle);
+ up_write(&dmar_global_lock);
+
+ return ret;
+}
+
+int dmar_device_add(acpi_handle handle)
+{
+ return dmar_device_hotplug(handle, true);
+}
+
+int dmar_device_remove(acpi_handle handle)
+{
+ return dmar_device_hotplug(handle, false);
+}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 361de6697d8a..f8db3788d08a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3671,17 +3671,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
return 0;
}

-int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
+{
+ struct dmar_atsr_unit *atsru;
+ struct acpi_dmar_atsr *tmp;
+
+ list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
+ tmp = (struct acpi_dmar_atsr *)atsru->hdr;
+ if (atsr->segment != tmp->segment)
+ continue;
+ if (atsr->header.length != tmp->header.length)
+ continue;
+ if (memcmp(atsr, tmp, atsr->header.length) == 0)
+ return atsru;
+ }
+
+ return NULL;
+}
+
+int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;

+ if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
+ return 0;
+
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+ atsru = dmar_find_atsr(atsr);
+ if (atsru)
+ return 0;
+
+ atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
if (!atsru)
return -ENOMEM;

- atsru->hdr = hdr;
+ /*
+ * If memory is allocated from slab by ACPI _DSM method, we need to
+ * copy the memory content because the memory buffer will be freed
+ * on return.
+ */
+ atsru->hdr = (void *)(atsru + 1);
+ memcpy(atsru->hdr, hdr, hdr->length);
atsru->include_all = atsr->flags & 0x1;
if (!atsru->include_all) {
atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
@@ -3704,6 +3735,47 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
kfree(atsru);
}

+int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+{
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = dmar_find_atsr(atsr);
+ if (atsru) {
+ list_del_rcu(&atsru->list);
+ synchronize_rcu();
+ intel_iommu_free_atsr(atsru);
+ }
+
+ return 0;
+}
+
+int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+{
+ int i;
+ struct device *dev;
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = dmar_find_atsr(atsr);
+ if (!atsru)
+ return 0;
+
+ if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
+ for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
+ i, dev)
+ return -EBUSY;
+
+ return 0;
+}
+
+int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{
+ return intel_iommu_enabled ? -ENOSYS : 0;
+}
+
static void intel_iommu_free_dmars(void)
{
struct dmar_rmrr_unit *rmrru, *rmrr_n;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 9b174893f0f5..aa309a8e4d3a 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1144,3 +1144,8 @@ struct irq_remap_ops intel_irq_remap_ops = {
.msi_setup_irq = intel_msi_setup_irq,
.setup_hpet_msi = intel_setup_hpet_msi,
};
+
+int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{
+ return irq_remapping_enabled ? -ENOSYS : 0;
+}
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index c8a576bc3a98..9c06bb4b5b14 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -120,6 +120,8 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
/* Intel IOMMU detection */
extern int detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void);
+extern int dmar_device_add(acpi_handle handle);
+extern int dmar_device_remove(acpi_handle handle);

static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
{
@@ -131,17 +133,48 @@ extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
+extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
+
#define dmar_parse_one_rmrr dmar_res_noop
#define dmar_parse_one_atsr dmar_res_noop
+#define dmar_check_one_atsr dmar_res_noop
+#define dmar_release_one_atsr dmar_res_noop
+
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
return 0;
}
+
+static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{
+ return 0;
+}
#endif /* CONFIG_INTEL_IOMMU */

+#ifdef CONFIG_IRQ_REMAP
+extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
+#else /* CONFIG_IRQ_REMAP */
+static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{ return 0; }
+#endif /* CONFIG_IRQ_REMAP */
+
+#else /* CONFIG_DMAR_TABLE */
+
+static inline int dmar_device_add(acpi_handle handle)
+{
+ return 0;
+}
+
+static inline int dmar_device_remove(acpi_handle handle)
+{
+ return 0;
+}
+
#endif /* CONFIG_DMAR_TABLE */

struct irte {
--
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-hotplug" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Jiang Liu
2014-05-06 07:05:29 UTC
Permalink
Implement required callback functions for intel_irq_remapping driver
to support DMAR unit hotplug.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel_irq_remapping.c | 222 ++++++++++++++++++++++++++---------
1 file changed, 169 insertions(+), 53 deletions(-)

diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index aa309a8e4d3a..9f1c86566452 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -36,7 +36,6 @@ struct hpet_scope {

static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
-static int ir_ioapic_num, ir_hpet_num;

/*
* Lock ordering:
@@ -320,7 +319,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)

down_read(&dmar_global_lock);
for (i = 0; i < MAX_IO_APICS; i++) {
- if (ir_ioapic[i].id == apic) {
+ if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
break;
}
@@ -347,7 +346,7 @@ static int set_hpet_sid(struct irte *irte, u8 id)

down_read(&dmar_global_lock);
for (i = 0; i < MAX_HPET_TBS; i++) {
- if (ir_hpet[i].id == id) {
+ if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
break;
}
@@ -446,17 +445,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}

-
-static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
+static int intel_setup_irq_remapping(struct intel_iommu *iommu)
{
struct ir_table *ir_table;
struct page *pages;
unsigned long *bitmap;

- ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
- GFP_ATOMIC);
+ if (iommu->ir_table)
+ return 0;

- if (!iommu->ir_table)
+ ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC);
+ if (!ir_table)
return -ENOMEM;

pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
@@ -465,7 +464,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
if (!pages) {
pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
- kfree(iommu->ir_table);
+ kfree(ir_table);
return -ENOMEM;
}

@@ -480,11 +479,22 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)

ir_table->base = page_address(pages);
ir_table->bitmap = bitmap;
+ iommu->ir_table = ir_table;

- iommu_set_irq_remapping(iommu, mode);
return 0;
}

+static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
+{
+ if (iommu && iommu->ir_table) {
+ free_pages((unsigned long)iommu->ir_table->base,
+ INTR_REMAP_PAGE_ORDER);
+ kfree(iommu->ir_table->bitmap);
+ kfree(iommu->ir_table);
+ iommu->ir_table = NULL;
+ }
+}
+
/*
* Disable Interrupt Remapping.
*/
@@ -639,9 +649,10 @@ static int __init intel_enable_irq_remapping(void)
if (!ecap_ir_support(iommu->ecap))
continue;

- if (intel_setup_irq_remapping(iommu, eim))
+ if (intel_setup_irq_remapping(iommu))
goto error;

+ iommu_set_irq_remapping(iommu, eim);
setup = 1;
}

@@ -672,12 +683,13 @@ error:
return -1;
}

-static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
- struct intel_iommu *iommu)
+static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu,
+ struct acpi_dmar_hardware_unit *drhd)
{
struct acpi_dmar_pci_path *path;
u8 bus;
- int count;
+ int count, free = -1;

bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -693,19 +705,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
PCI_SECONDARY_BUS);
path++;
}
- ir_hpet[ir_hpet_num].bus = bus;
- ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
- ir_hpet[ir_hpet_num].iommu = iommu;
- ir_hpet[ir_hpet_num].id = scope->enumeration_id;
- ir_hpet_num++;
+
+ for (count = 0; count < MAX_HPET_TBS; count++) {
+ if (ir_hpet[count].iommu == iommu &&
+ ir_hpet[count].id == scope->enumeration_id)
+ return 0;
+ else if (ir_hpet[count].iommu == NULL && free == -1)
+ free = count;
+ }
+ if (free == -1) {
+ pr_warn("Exceeded Max HPET blocks\n");
+ return -ENOSPC;
+ }
+
+ ir_hpet[free].iommu = iommu;
+ ir_hpet[free].id = scope->enumeration_id;
+ ir_hpet[free].bus = bus;
+ ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
+ pr_info("HPET id %d under DRHD base 0x%Lx\n",
+ scope->enumeration_id, drhd->address);
+
+ return 0;
}

-static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
- struct intel_iommu *iommu)
+static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu,
+ struct acpi_dmar_hardware_unit *drhd)
{
struct acpi_dmar_pci_path *path;
u8 bus;
- int count;
+ int count, free = -1;

bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -722,54 +751,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
path++;
}

- ir_ioapic[ir_ioapic_num].bus = bus;
- ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
- ir_ioapic[ir_ioapic_num].iommu = iommu;
- ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
- ir_ioapic_num++;
+ for (count = 0; count < MAX_IO_APICS; count++) {
+ if (ir_ioapic[count].iommu == iommu &&
+ ir_ioapic[count].id == scope->enumeration_id)
+ return 0;
+ else if (ir_ioapic[count].iommu == NULL && free == -1)
+ free = count;
+ }
+ if (free == -1) {
+ pr_warn("Exceeded Max IO APICS\n");
+ return -ENOSPC;
+ }
+
+ ir_ioapic[free].bus = bus;
+ ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
+ ir_ioapic[free].iommu = iommu;
+ ir_ioapic[free].id = scope->enumeration_id;
+ pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
+ scope->enumeration_id, drhd->address, iommu->seq_id);
+
+ return 0;
}

static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu)
{
+ int ret = 0;
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_device_scope *scope;
void *start, *end;

drhd = (struct acpi_dmar_hardware_unit *)header;
-
start = (void *)(drhd + 1);
end = ((void *)drhd) + header->length;

- while (start < end) {
+ while (start < end && ret == 0) {
scope = start;
- if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
- if (ir_ioapic_num == MAX_IO_APICS) {
- printk(KERN_WARNING "Exceeded Max IO APICS\n");
- return -1;
- }
-
- printk(KERN_INFO "IOAPIC id %d under DRHD base "
- " 0x%Lx IOMMU %d\n", scope->enumeration_id,
- drhd->address, iommu->seq_id);
+ if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
+ ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
+ else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
+ ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
+ start += scope->length;
+ }

- ir_parse_one_ioapic_scope(scope, iommu);
- } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
- if (ir_hpet_num == MAX_HPET_TBS) {
- printk(KERN_WARNING "Exceeded Max HPET blocks\n");
- return -1;
- }
+ return ret;
+}

- printk(KERN_INFO "HPET id %d under DRHD base"
- " 0x%Lx\n", scope->enumeration_id,
- drhd->address);
+static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
+{
+ int i;

- ir_parse_one_hpet_scope(scope, iommu);
- }
- start += scope->length;
- }
+ for (i = 0; i < MAX_HPET_TBS; i++)
+ if (ir_hpet[i].iommu == iommu)
+ ir_hpet[i].iommu = NULL;

- return 0;
+ for (i = 0; i < MAX_IO_APICS; i++)
+ if (ir_ioapic[i].iommu == iommu)
+ ir_ioapic[i].iommu = NULL;
}

/*
@@ -1145,7 +1183,85 @@ struct irq_remap_ops intel_irq_remap_ops = {
.setup_hpet_msi = intel_setup_hpet_msi,
};

+/*
+ * Support of Interrupt Remapping Unit Hotplug
+ */
+static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
+{
+ int ret;
+ int eim = x2apic_enabled();
+
+ if (eim && !ecap_eim_support(iommu->ecap)) {
+ pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
+ iommu->reg_phys, iommu->ecap);
+ return -ENODEV;
+ }
+
+ if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
+ pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
+ iommu->reg_phys);
+ return -ENODEV;
+ }
+
+ /* TODO: check all IOAPICs are covered by IOMMU */
+
+ /* Setup Interrupt-remapping now. */
+ ret = intel_setup_irq_remapping(iommu);
+ if (ret) {
+ pr_err("DRHD %Lx: failed to allocate resource\n",
+ iommu->reg_phys);
+ ir_remove_ioapic_hpet_scope(iommu);
+ return ret;
+ }
+
+ if (!iommu->qi) {
+ /* Clear previous faults. */
+ dmar_fault(-1, iommu);
+ iommu_disable_irq_remapping(iommu);
+ dmar_disable_qi(iommu);
+ }
+
+ /* Enable queued invalidation */
+ ret = dmar_enable_qi(iommu);
+ if (!ret) {
+ iommu_set_irq_remapping(iommu, eim);
+ } else {
+ pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
+ iommu->reg_phys, iommu->ecap, ret);
+ intel_teardown_irq_remapping(iommu);
+ ir_remove_ioapic_hpet_scope(iommu);
+ }
+
+ return ret;
+}
+
int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
- return irq_remapping_enabled ? -ENOSYS : 0;
+ int ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (!irq_remapping_enabled)
+ return 0;
+ if (iommu == NULL)
+ return -EINVAL;
+ if (!ecap_ir_support(iommu->ecap))
+ return 0;
+
+ if (insert) {
+ if (!iommu->ir_table)
+ ret = dmar_ir_add(dmaru, iommu);
+ } else {
+ if (iommu->ir_table) {
+ if (!bitmap_empty(iommu->ir_table->bitmap,
+ INTR_REMAP_TABLE_ENTRIES)) {
+ ret = -EBUSY;
+ } else {
+ iommu_disable_irq_remapping(iommu);
+ intel_teardown_irq_remapping(iommu);
+ ir_remove_ioapic_hpet_scope(iommu);
+ }
+ }
+ }
+
+ return ret;
}
--
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-hotplug" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Jiang Liu
2014-05-06 07:05:15 UTC
Permalink
Introduce domain_attach_iommu()/domain_detach_iommu() and refine
iommu_attach_domain()/iommu_detach_domain() to make code symmetric
and improve readability.

Signed-off-by: Jiang Liu <***@linux.intel.com>
---
drivers/iommu/intel-iommu.c | 146 ++++++++++++++++++++++++-------------------
1 file changed, 80 insertions(+), 66 deletions(-)

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 63f351a39c63..29f613e7eea3 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1525,32 +1525,39 @@ static struct dmar_domain *alloc_domain(int flags)
return domain;
}

-static int iommu_attach_domain(struct dmar_domain *domain,
- struct intel_iommu *iommu)
+static int __iommu_attach_domain(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
{
int num;
unsigned long ndomains;
- unsigned long flags;

ndomains = cap_ndoms(iommu->cap);
-
- spin_lock_irqsave(&iommu->lock, flags);
-
num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- printk(KERN_ERR "IOMMU: no free domain ids\n");
- return -ENOMEM;
+ if (num < ndomains) {
+ set_bit(num, iommu->domain_ids);
+ iommu->domains[num] = domain;
+ } else {
+ num = -ENOSPC;
}

- domain->id = num;
- domain->iommu_count++;
- set_bit(num, iommu->domain_ids);
- set_bit(iommu->seq_id, domain->iommu_bmp);
- iommu->domains[num] = domain;
+ return num;
+}
+
+static int iommu_attach_domain(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
+{
+ int num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ num = __iommu_attach_domain(domain, iommu);
+ if (num < 0)
+ pr_err("IOMMU: no free domain ids\n");
+ else
+ domain->id = num;
spin_unlock_irqrestore(&iommu->lock, flags);

- return 0;
+ return num;
}

static void iommu_detach_domain(struct dmar_domain *domain,
@@ -1560,17 +1567,53 @@ static void iommu_detach_domain(struct dmar_domain *domain,
int num, ndomains;

spin_lock_irqsave(&iommu->lock, flags);
- ndomains = cap_ndoms(iommu->cap);
- for_each_set_bit(num, iommu->domain_ids, ndomains) {
- if (iommu->domains[num] == domain) {
- clear_bit(num, iommu->domain_ids);
- iommu->domains[num] = NULL;
- break;
+ if (domain_type_is_vm_or_si(domain)) {
+ ndomains = cap_ndoms(iommu->cap);
+ for_each_set_bit(num, iommu->domain_ids, ndomains) {
+ if (iommu->domains[num] == domain) {
+ clear_bit(num, iommu->domain_ids);
+ iommu->domains[num] = NULL;
+ break;
+ }
}
+ } else {
+ clear_bit(domain->id, iommu->domain_ids);
+ iommu->domains[domain->id] = NULL;
}
spin_unlock_irqrestore(&iommu->lock, flags);
}

+static void domain_attach_iommu(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->iommu_lock, flags);
+ if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
+ domain->iommu_count++;
+ if (domain->iommu_count == 1)
+ domain->nid = iommu->node;
+ domain_update_iommu_cap(domain);
+ }
+ spin_unlock_irqrestore(&domain->iommu_lock, flags);
+}
+
+static int domain_detach_iommu(struct dmar_domain *domain,
+ struct intel_iommu *iommu)
+{
+ unsigned long flags;
+ int count = INT_MAX;
+
+ spin_lock_irqsave(&domain->iommu_lock, flags);
+ if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
+ count = --domain->iommu_count;
+ domain_update_iommu_cap(domain);
+ }
+ spin_unlock_irqrestore(&domain->iommu_lock, flags);
+
+ return count;
+}
+
static struct iova_domain reserved_iova_list;
static struct lock_class_key reserved_rbtree_key;

@@ -1708,9 +1751,7 @@ static void domain_exit(struct dmar_domain *domain)
/* clear attached or cached domains */
rcu_read_lock();
for_each_active_iommu(iommu, drhd)
- if (domain_type_is_vm(domain) ||
- test_bit(iommu->seq_id, domain->iommu_bmp))
- iommu_detach_domain(domain, iommu);
+ iommu_detach_domain(domain, iommu);
rcu_read_unlock();

dma_free_pagelist(freelist);
@@ -1764,16 +1805,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
}

if (found == 0) {
- num = find_first_zero_bit(iommu->domain_ids, ndomains);
- if (num >= ndomains) {
+ id = __iommu_attach_domain(domain, iommu);
+ if (id < 0) {
spin_unlock_irqrestore(&iommu->lock, flags);
- printk(KERN_ERR "IOMMU: no free domain ids\n");
+ pr_err("IOMMU: no free domain ids\n");
return -EFAULT;
}
-
- set_bit(num, iommu->domain_ids);
- iommu->domains[num] = domain;
- id = num;
}

/* Skip top levels of page tables for
@@ -1832,14 +1869,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
iommu_enable_dev_iotlb(info);
spin_unlock_irqrestore(&iommu->lock, flags);

- spin_lock_irqsave(&domain->iommu_lock, flags);
- if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
- domain->iommu_count++;
- if (domain->iommu_count == 1)
- domain->nid = iommu->node;
- domain_update_iommu_cap(domain);
- }
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
+ domain_attach_iommu(domain, iommu);
+
return 0;
}

@@ -2104,7 +2135,7 @@ static inline void unlink_domain_info(struct device_domain_info *info)
static void domain_remove_dev_info(struct dmar_domain *domain)
{
struct device_domain_info *info;
- unsigned long flags, flags2;
+ unsigned long flags;

spin_lock_irqsave(&device_domain_lock, flags);
while (!list_empty(&domain->devices)) {
@@ -2118,16 +2149,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)

if (domain_type_is_vm(domain)) {
iommu_detach_dependent_devices(info->iommu, info->dev);
- /* clear this iommu in iommu_bmp, update iommu count
- * and capabilities
- */
- spin_lock_irqsave(&domain->iommu_lock, flags2);
- if (test_and_clear_bit(info->iommu->seq_id,
- domain->iommu_bmp)) {
- domain->iommu_count--;
- domain_update_iommu_cap(domain);
- }
- spin_unlock_irqrestore(&domain->iommu_lock, flags2);
+ domain_detach_iommu(domain, info->iommu);
}

free_devinfo_mem(info);
@@ -2259,11 +2281,12 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
domain = alloc_domain(0);
if (!domain)
goto error;
- if (iommu_attach_domain(domain, iommu)) {
+ if (iommu_attach_domain(domain, iommu) < 0) {
free_domain_mem(domain);
domain = NULL;
goto error;
}
+ domain_attach_iommu(domain, iommu);
free = domain;
if (domain_init(domain, gaw))
goto error;
@@ -2426,10 +2449,11 @@ static int __init si_domain_init(int hw)

for_each_active_iommu(iommu, drhd) {
ret = iommu_attach_domain(si_domain, iommu);
- if (ret) {
+ if (ret < 0) {
domain_exit(si_domain);
return -EFAULT;
}
+ domain_attach_iommu(si_domain, iommu);
}

if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
@@ -4099,19 +4123,9 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
spin_unlock_irqrestore(&device_domain_lock, flags);

if (found == 0) {
- unsigned long tmp_flags;
- spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
- clear_bit(iommu->seq_id, domain->iommu_bmp);
- domain->iommu_count--;
- domain_update_iommu_cap(domain);
- spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
-
- if (!domain_type_is_vm_or_si(domain)) {
- spin_lock_irqsave(&iommu->lock, tmp_flags);
- clear_bit(domain->id, iommu->domain_ids);
- iommu->domains[domain->id] = NULL;
- spin_unlock_irqrestore(&iommu->lock, tmp_flags);
- }
+ domain_detach_iommu(domain, iommu);
+ if (!domain_type_is_vm_or_si(domain))
+ iommu_detach_domain(domain, iommu);
}
}
--
1.7.10.4
Joerg Roedel
2014-05-19 09:23:58 UTC
Permalink
Hey David,
Patch 1-13 are bugfixes and code improvement for current drivers.
Patch 14-17 enhances DMAR framework to support hotplug
Patch 18 enhances Intel interrupt remapping driver to support hotplug
Patch 19 enhances error handling in Intel IR driver
Patch 20 enhance Intel IOMMU to support hotplug
Patch 21 enhance ACPI pci_root driver to handle DMAR units
Have you had a chance to look into this patch-set? I'll wonder whether
it should be merged for 3.16.


Joerg


--
To unsubscribe from this list: send the line "unsubscribe linux-hotplug" in
the body of a message to ***@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Loading...