526
526
iounmap(priv->clr_base);
529
int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
531
struct mlx4_priv *priv = mlx4_priv(dev);
535
* We assume that mapping one page is enough for the whole EQ
536
* context table. This is fine with all current HCAs, because
537
* we only use 32 EQs and each EQ uses 64 bytes of context
538
* memory, or 1 KB total.
540
priv->eq_table.icm_virt = icm_virt;
541
priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
542
if (!priv->eq_table.icm_page)
544
priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
545
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
546
if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
547
__free_page(priv->eq_table.icm_page);
551
ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
553
pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
554
PCI_DMA_BIDIRECTIONAL);
555
__free_page(priv->eq_table.icm_page);
561
void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
563
struct mlx4_priv *priv = mlx4_priv(dev);
565
mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
566
pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
567
PCI_DMA_BIDIRECTIONAL);
568
__free_page(priv->eq_table.icm_page);
571
529
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
573
531
struct mlx4_priv *priv = mlx4_priv(dev);