return 0;
}
-int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
-{
- if (*ctx == NULL || *afu_irq == 0) {
- *afu_irq = 1;
- *ctx = cxl_get_context(pdev);
- } else {
- (*afu_irq)++;
- if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) {
- *ctx = list_next_entry(*ctx, extra_irq_contexts);
- *afu_irq = 1;
- }
- }
- return cxl_find_afu_irq(*ctx, *afu_irq);
-}
-/* Exported via cxl_base */
int cxl_set_priv(struct cxl_context *ctx, void *priv)
{
}
EXPORT_SYMBOL_GPL(cxl_pci_disable_device);
-int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
-{
- int ret;
- struct cxl_calls *calls;
-
- calls = cxl_calls_get();
- if (!calls)
- return -EBUSY;
-
- ret = calls->cxl_next_msi_hwirq(pdev, ctx, afu_irq);
-
- cxl_calls_put(calls);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(cxl_next_msi_hwirq);
-
static int __init cxl_base_init(void)
{
struct device_node *np;
ctx->pending_afu_err = false;
INIT_LIST_HEAD(&ctx->irq_names);
- INIT_LIST_HEAD(&ctx->extra_irq_contexts);
/*
* When we have to destroy all contexts in cxl_context_detach_all() we
struct rcu_head rcu;
- /*
- * Only used when more interrupts are allocated via
- * pci_enable_msix_range than are supported in the default context, to
- * use additional contexts to overcome the limitation. i.e. Mellanox
- * CX4 only:
- */
- struct list_head extra_irq_contexts;
-
struct mm_struct *mm;
u16 tidr;
/* Internal functions wrapped in cxl_base to allow PHB to call them */
bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu);
void _cxl_pci_disable_device(struct pci_dev *dev);
-int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
struct cxl_calls {
void (*cxl_slbia)(struct mm_struct *mm);
bool (*cxl_pci_associate_default_context)(struct pci_dev *dev, struct cxl_afu *afu);
void (*cxl_pci_disable_device)(struct pci_dev *dev);
- int (*cxl_next_msi_hwirq)(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
struct module *owner;
};
.cxl_slbia = cxl_slbia_core,
.cxl_pci_associate_default_context = _cxl_pci_associate_default_context,
.cxl_pci_disable_device = _cxl_pci_disable_device,
- .cxl_next_msi_hwirq = _cxl_next_msi_hwirq,
.owner = THIS_MODULE,
};
/* Get the process element for this context */
int cxl_process_element(struct cxl_context *ctx);
-/*
- * Limit the number of interrupts that a single context can allocate via
- * cxl_start_work. If using the api with a real phb, this may be used to
- * request that additional default contexts be created when allocating
- * interrupts via pci_enable_msix_range. These will be set to the same running
- * state as the default context, and if that is running it will reuse the
- * parameters previously passed to cxl_start_context for the default context.
- */
-int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs);
-int cxl_get_max_irqs_per_process(struct pci_dev *dev);
-
-/*
- * Use to simultaneously iterate over hardware interrupt numbers, contexts and
- * afu interrupt numbers allocated for the device via pci_enable_msix_range and
- * is a useful convenience function when working with hardware that has
- * limitations on the number of interrupts per process. *ctx and *afu_irq
- * should be NULL and 0 to start the iteration.
- */
-int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
-
/*
* These calls allow drivers to create their own file descriptors and make them
* identical to the cxl file descriptor user API. An example use case: