Skip to content

Commit aad7e7a

Browse files
author
Maxim Levitsky
committed
net: mana: Add support for page sizes other than 4KB on ARM64
JIRA: https://issues.redhat.com/browse/RHEL-54330 commit 382d174 Author: Haiyang Zhang <haiyangz@microsoft.com> Date: Mon Jun 17 13:17:26 2024 -0700 net: mana: Add support for page sizes other than 4KB on ARM64 As defined by the MANA Hardware spec, the queue size for DMA is 4KB minimal, and power of 2. And, the HWC queue size has to be exactly 4KB. To support page sizes other than 4KB on ARM64, define the minimal queue size as a macro separately from the PAGE_SIZE, which we always assumed it to be 4KB before supporting ARM64. Also, add MANA specific macros and update code related to size alignment, DMA region calculations, etc. Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com> Reviewed-by: Michael Kelley <mhklinux@outlook.com> Link: https://lore.kernel.org/r/1718655446-6576-1-git-send-email-haiyangz@microsoft.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
1 parent 995166a commit aad7e7a

File tree

7 files changed

+35
-25
lines changed

7 files changed

+35
-25
lines changed

drivers/net/ethernet/microsoft/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ if NET_VENDOR_MICROSOFT
1818
config MICROSOFT_MANA
1919
tristate "Microsoft Azure Network Adapter (MANA) support"
2020
depends on PCI_MSI
21-
depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN && ARM64_4K_PAGES)
21+
depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN)
2222
depends on PCI_HYPERV
2323
select AUXILIARY_BUS
2424
select PAGE_POOL

drivers/net/ethernet/microsoft/mana/gdma_main.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
184184
dma_addr_t dma_handle;
185185
void *buf;
186186

187-
if (length < PAGE_SIZE || !is_power_of_2(length))
187+
if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
188188
return -EINVAL;
189189

190190
gmi->dev = gc->dev;
@@ -720,7 +720,7 @@ EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
720720
static int mana_gd_create_dma_region(struct gdma_dev *gd,
721721
struct gdma_mem_info *gmi)
722722
{
723-
unsigned int num_page = gmi->length / PAGE_SIZE;
723+
unsigned int num_page = gmi->length / MANA_PAGE_SIZE;
724724
struct gdma_create_dma_region_req *req = NULL;
725725
struct gdma_create_dma_region_resp resp = {};
726726
struct gdma_context *gc = gd->gdma_context;
@@ -730,10 +730,10 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
730730
int err;
731731
int i;
732732

733-
if (length < PAGE_SIZE || !is_power_of_2(length))
733+
if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
734734
return -EINVAL;
735735

736-
if (offset_in_page(gmi->virt_addr) != 0)
736+
if (!MANA_PAGE_ALIGNED(gmi->virt_addr))
737737
return -EINVAL;
738738

739739
hwc = gc->hwc.driver_data;
@@ -754,7 +754,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
754754
req->page_addr_list_len = num_page;
755755

756756
for (i = 0; i < num_page; i++)
757-
req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
757+
req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE;
758758

759759
err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
760760
if (err)

drivers/net/ethernet/microsoft/mana/hw_channel.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -361,12 +361,12 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
361361
int err;
362362

363363
eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
364-
if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
365-
eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
364+
if (eq_size < MANA_MIN_QSIZE)
365+
eq_size = MANA_MIN_QSIZE;
366366

367367
cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
368-
if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
369-
cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
368+
if (cq_size < MANA_MIN_QSIZE)
369+
cq_size = MANA_MIN_QSIZE;
370370

371371
hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
372372
if (!hwc_cq)
@@ -428,7 +428,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
428428

429429
dma_buf->num_reqs = q_depth;
430430

431-
buf_size = PAGE_ALIGN(q_depth * max_msg_size);
431+
buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
432432

433433
gmi = &dma_buf->mem_info;
434434
err = mana_gd_alloc_memory(gc, buf_size, gmi);
@@ -496,8 +496,8 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
496496
else
497497
queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
498498

499-
if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
500-
queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
499+
if (queue_size < MANA_MIN_QSIZE)
500+
queue_size = MANA_MIN_QSIZE;
501501

502502
hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
503503
if (!hwc_wq)

drivers/net/ethernet/microsoft/mana/mana_en.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1903,10 +1903,10 @@ static int mana_create_txq(struct mana_port_context *apc,
19031903
* to prevent overflow.
19041904
*/
19051905
txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1906-
BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1906+
BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size));
19071907

19081908
cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1909-
cq_size = PAGE_ALIGN(cq_size);
1909+
cq_size = MANA_PAGE_ALIGN(cq_size);
19101910

19111911
gc = gd->gdma_context;
19121912

@@ -2203,8 +2203,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
22032203
if (err)
22042204
goto out;
22052205

2206-
rq_size = PAGE_ALIGN(rq_size);
2207-
cq_size = PAGE_ALIGN(cq_size);
2206+
rq_size = MANA_PAGE_ALIGN(rq_size);
2207+
cq_size = MANA_PAGE_ALIGN(cq_size);
22082208

22092209
/* Create RQ */
22102210
memset(&spec, 0, sizeof(spec));

drivers/net/ethernet/microsoft/mana/shm_channel.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
#include <linux/io.h>
77
#include <linux/mm.h>
88

9+
#include <net/mana/gdma.h>
910
#include <net/mana/shm_channel.h>
1011

1112
#define PAGE_FRAME_L48_WIDTH_BYTES 6
@@ -155,8 +156,8 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
155156
return err;
156157
}
157158

158-
if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) ||
159-
!PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr))
159+
if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) ||
160+
!MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr))
160161
return -EINVAL;
161162

162163
if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
@@ -183,31 +184,31 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
183184

184185
/* EQ addr: low 48 bits of frame address */
185186
shmem = (u64 *)ptr;
186-
frame_addr = PHYS_PFN(eq_addr);
187+
frame_addr = MANA_PFN(eq_addr);
187188
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
188189
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
189190
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
190191
ptr += PAGE_FRAME_L48_WIDTH_BYTES;
191192

192193
/* CQ addr: low 48 bits of frame address */
193194
shmem = (u64 *)ptr;
194-
frame_addr = PHYS_PFN(cq_addr);
195+
frame_addr = MANA_PFN(cq_addr);
195196
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
196197
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
197198
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
198199
ptr += PAGE_FRAME_L48_WIDTH_BYTES;
199200

200201
/* RQ addr: low 48 bits of frame address */
201202
shmem = (u64 *)ptr;
202-
frame_addr = PHYS_PFN(rq_addr);
203+
frame_addr = MANA_PFN(rq_addr);
203204
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
204205
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
205206
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
206207
ptr += PAGE_FRAME_L48_WIDTH_BYTES;
207208

208209
/* SQ addr: low 48 bits of frame address */
209210
shmem = (u64 *)ptr;
210-
frame_addr = PHYS_PFN(sq_addr);
211+
frame_addr = MANA_PFN(sq_addr);
211212
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
212213
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
213214
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);

include/net/mana/gdma.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,15 @@ struct gdma_dev {
225225
struct auxiliary_device *adev;
226226
};
227227

228-
#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE
228+
/* MANA_PAGE_SIZE is the DMA unit */
229+
#define MANA_PAGE_SHIFT 12
230+
#define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
231+
#define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
232+
#define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
233+
#define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
234+
235+
/* Required by HW */
236+
#define MANA_MIN_QSIZE MANA_PAGE_SIZE
229237

230238
#define GDMA_CQE_SIZE 64
231239
#define GDMA_EQE_SIZE 16

include/net/mana/mana.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,8 @@ enum TRI_STATE {
4040

4141
#define MAX_SEND_BUFFERS_PER_QUEUE 256
4242

43-
#define EQ_SIZE (8 * PAGE_SIZE)
43+
#define EQ_SIZE (8 * MANA_PAGE_SIZE)
44+
4445
#define LOG2_EQ_THROTTLE 3
4546

4647
#define MAX_PORTS_IN_MANA_DEV 256

0 commit comments

Comments
 (0)