diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index afd1e0a228955c60240f20480f0affd5cbeb18bc..3269c3b33295f58d85893dcf2cafe25ac12f76eb 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -195,7 +195,7 @@ static int patch_do_basic_check(struct device *dev, { u64 dma_mask = *dev->dma_mask; - if (dir != DMA_FROM_DEVICE || dir != DMA_BIDIRECTIONAL) + if (dir != DMA_FROM_DEVICE && dir != DMA_BIDIRECTIONAL) return false; if (dma_mask <= DMA_BIT_MASK(32)) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index fbe2601ba3e11d40365eee0dd852f04a4e546fd3..0d1352b7a7a10666ba82392c57eaf1754fdb0c6b 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1234,7 +1234,10 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, page = dma_alloc_contiguous(dev, alloc_size, gfp); if (!page) { if (is_zhaoxin_kh40000()) { - if (!(gfp & (GFP_DMA | GFP_DMA32))) { + if (node == NUMA_NO_NODE) { + page = __alloc_pages_nodemask(gfp, get_order(alloc_size), + numa_mem_id(), NULL); + } else if (!(gfp & (GFP_DMA | GFP_DMA32))) { nodemask_t nodemask; nodes_clear(nodemask); diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index ea0044a1f2dc143a23ab82b17237d95d67012b21..70d18bcc73cbefbe53616ecfd4601bbfa8cc2929 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -113,7 +113,10 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, again: if (!page) { if (is_zhaoxin_kh40000()) { - if (!(gfp & (GFP_DMA | GFP_DMA32))) { + if (node == NUMA_NO_NODE) { + page = __alloc_pages_nodemask(gfp, get_order(size), + numa_mem_id(), NULL); + } else if (!(gfp & (GFP_DMA | GFP_DMA32))) { nodemask_t nodemask; nodes_clear(nodemask);