|
11 | 11 | #include <linux/pci.h> |
12 | 12 | #include <linux/irq.h> |
13 | 13 | #include <linux/export.h> |
| 14 | +#include <linux/irqchip/irq-msi-lib.h> |
14 | 15 | #include <asm/mshyperv.h> |
15 | 16 |
|
16 | 17 | static int hv_map_interrupt(union hv_device_id device_id, bool level, |
@@ -289,59 +290,99 @@ static void hv_teardown_msi_irq(struct pci_dev *dev, struct irq_data *irqd) |
289 | 290 | (void)hv_unmap_msi_interrupt(dev, &old_entry); |
290 | 291 | } |
291 | 292 |
|
292 | | -static void hv_msi_free_irq(struct irq_domain *domain, |
293 | | - struct msi_domain_info *info, unsigned int virq) |
294 | | -{ |
295 | | - struct irq_data *irqd = irq_get_irq_data(virq); |
296 | | - struct msi_desc *desc; |
297 | | - |
298 | | - if (!irqd) |
299 | | - return; |
300 | | - |
301 | | - desc = irq_data_get_msi_desc(irqd); |
302 | | - if (!desc || !desc->irq || WARN_ON_ONCE(!dev_is_pci(desc->dev))) |
303 | | - return; |
304 | | - |
305 | | - hv_teardown_msi_irq(to_pci_dev(desc->dev), irqd); |
306 | | -} |
307 | | - |
308 | 293 | /* |
309 | 294 | * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, |
310 | 295 | * which implement the MSI or MSI-X Capability Structure. |
311 | 296 | */ |
312 | 297 | static struct irq_chip hv_pci_msi_controller = { |
313 | 298 | .name = "HV-PCI-MSI", |
314 | | - .irq_unmask = pci_msi_unmask_irq, |
315 | | - .irq_mask = pci_msi_mask_irq, |
316 | 299 | .irq_ack = irq_chip_ack_parent, |
317 | | - .irq_retrigger = irq_chip_retrigger_hierarchy, |
318 | 300 | .irq_compose_msi_msg = hv_irq_compose_msi_msg, |
319 | | - .irq_set_affinity = msi_domain_set_affinity, |
320 | | - .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MOVE_DEFERRED, |
| 301 | + .irq_set_affinity = irq_chip_set_affinity_parent, |
321 | 302 | }; |
322 | 303 |
|
323 | | -static struct msi_domain_ops pci_msi_domain_ops = { |
324 | | - .msi_free = hv_msi_free_irq, |
325 | | - .msi_prepare = pci_msi_prepare, |
| 304 | +static bool hv_init_dev_msi_info(struct device *dev, struct irq_domain *domain, |
| 305 | + struct irq_domain *real_parent, struct msi_domain_info *info) |
| 306 | +{ |
| 307 | + struct irq_chip *chip = info->chip; |
| 308 | + |
| 309 | + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) |
| 310 | + return false; |
| 311 | + |
| 312 | + chip->flags |= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MOVE_DEFERRED; |
| 313 | + |
| 314 | + info->ops->msi_prepare = pci_msi_prepare; |
| 315 | + |
| 316 | + return true; |
| 317 | +} |
| 318 | + |
| 319 | +#define HV_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | MSI_FLAG_PCI_MSIX) |
| 320 | +#define HV_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS) |
| 321 | + |
| 322 | +static struct msi_parent_ops hv_msi_parent_ops = { |
| 323 | + .supported_flags = HV_MSI_FLAGS_SUPPORTED, |
| 324 | + .required_flags = HV_MSI_FLAGS_REQUIRED, |
| 325 | + .bus_select_token = DOMAIN_BUS_NEXUS, |
| 326 | + .bus_select_mask = MATCH_PCI_MSI, |
| 327 | + .chip_flags = MSI_CHIP_FLAG_SET_ACK, |
| 328 | + .prefix = "HV-", |
| 329 | + .init_dev_msi_info = hv_init_dev_msi_info, |
326 | 330 | }; |
327 | 331 |
|
328 | | -static struct msi_domain_info hv_pci_msi_domain_info = { |
329 | | - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
330 | | - MSI_FLAG_PCI_MSIX, |
331 | | - .ops = &pci_msi_domain_ops, |
332 | | - .chip = &hv_pci_msi_controller, |
333 | | - .handler = handle_edge_irq, |
334 | | - .handler_name = "edge", |
| 332 | +static int hv_msi_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs, |
| 333 | + void *arg) |
| 334 | +{ |
| 335 | + /* |
| 336 | + * TODO: The allocation bits of hv_irq_compose_msi_msg(), i.e. everything except |
| 337 | + * entry_to_msi_msg() should be in here. |
| 338 | + */ |
| 339 | + |
| 340 | + int ret; |
| 341 | + |
| 342 | + ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg); |
| 343 | + if (ret) |
| 344 | + return ret; |
| 345 | + |
| 346 | + for (int i = 0; i < nr_irqs; ++i) { |
| 347 | + irq_domain_set_info(d, virq + i, 0, &hv_pci_msi_controller, NULL, |
| 348 | + handle_edge_irq, NULL, "edge"); |
| 349 | + } |
| 350 | + return 0; |
| 351 | +} |
| 352 | + |
| 353 | +static void hv_msi_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs) |
| 354 | +{ |
| 355 | + for (int i = 0; i < nr_irqs; ++i) { |
| 356 | + struct irq_data *irqd = irq_domain_get_irq_data(d, virq); |
| 357 | + struct msi_desc *desc; |
| 358 | + |
| 359 | + desc = irq_data_get_msi_desc(irqd); |
| 360 | + if (!desc || !desc->irq || WARN_ON_ONCE(!dev_is_pci(desc->dev))) |
| 361 | + continue; |
| 362 | + |
| 363 | + hv_teardown_msi_irq(to_pci_dev(desc->dev), irqd); |
| 364 | + } |
| 365 | + irq_domain_free_irqs_top(d, virq, nr_irqs); |
| 366 | +} |
| 367 | + |
| 368 | +static const struct irq_domain_ops hv_msi_domain_ops = { |
| 369 | + .select = msi_lib_irq_domain_select, |
| 370 | + .alloc = hv_msi_domain_alloc, |
| 371 | + .free = hv_msi_domain_free, |
335 | 372 | }; |
336 | 373 |
|
337 | 374 | struct irq_domain * __init hv_create_pci_msi_domain(void) |
338 | 375 | { |
339 | 376 | struct irq_domain *d = NULL; |
340 | | - struct fwnode_handle *fn; |
341 | 377 |
|
342 | | - fn = irq_domain_alloc_named_fwnode("HV-PCI-MSI"); |
343 | | - if (fn) |
344 | | - d = pci_msi_create_irq_domain(fn, &hv_pci_msi_domain_info, x86_vector_domain); |
| 378 | + struct irq_domain_info info = { |
| 379 | + .fwnode = irq_domain_alloc_named_fwnode("HV-PCI-MSI"), |
| 380 | + .ops = &hv_msi_domain_ops, |
| 381 | + .parent = x86_vector_domain, |
| 382 | + }; |
| 383 | + |
| 384 | + if (info.fwnode) |
| 385 | + d = msi_create_parent_irq_domain(&info, &hv_msi_parent_ops); |
345 | 386 |
|
346 | 387 | /* No point in going further if we can't get an irq domain */ |
347 | 388 | BUG_ON(!d); |
|
0 commit comments