wwan: quectel_QMI: Bump 1.3.6 to 1.3.8

Also cleanup Makefile logic to minimal set

Signed-off-by: Sean Khan <datapronix@protonmail.com>
This commit is contained in:
Sean Khan 2025-06-03 14:57:32 -04:00
parent ca6ffaff13
commit ed919beb16
6 changed files with 563 additions and 161 deletions

View File

@ -1,15 +1,8 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=pcie_mhi
PKG_VERSION:=1.3.6
PKG_RELEASE:=2
PKG_VERSION:=1.3.8
PKG_RELEASE:=1
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
@ -18,10 +11,10 @@ PKG_BUILD_PARALLEL:=1
PKG_BUILD_FLAGS:=gc-sections lto
define KernelPackage/pcie_mhi
SUBMENU:=WWAN Support
TITLE:=Kernel pcie driver for MHI device
SUBMENU:=Network Devices
TITLE:=Kernel PCIe driver for MHI device
DEPENDS:=@(TARGET_qualcommax_ipq807x||TARGET_qualcommax_ipq50xx) \
+pciids +pciutils +quectel-cm +kmod-qmi_wwan_q
+pciids +pciutils +quectel-cm
FILES:=$(PKG_BUILD_DIR)/pcie_mhi.ko
AUTOLOAD:=$(call AutoLoad,90,pcie_mhi)
endef
@ -36,18 +29,8 @@ EXTRA_CFLAGS+= \
-Wno-missing-prototypes \
-Wno-missing-declarations
MAKE_OPTS:= \
ARCH="$(LINUX_KARCH)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \
CXXFLAGS="$(TARGET_CXXFLAGS)" \
M="$(PKG_BUILD_DIR)" \
$(EXTRA_KCONFIG)
define Build/Compile
+$(MAKE) -C "$(LINUX_DIR)" $(strip $(MAKE_OPTS)) \
$(KERNEL_MAKE_FLAGS) \
$(PKG_JOBS) \
+$(KERNEL_MAKE) EXTRA_CFLAGS="$(EXTRA_CFLAGS)" M="$(PKG_BUILD_DIR)" \
modules
endef

View File

@ -1,4 +1,33 @@
Release Notes
[V1.3.8]
Date: 27/06/2024
enhancement:
1. support SDX7x's PID/VID
Release Notes
[V1.3.7]
Date: 27/03/2024
enhancement:
1. support SDX35's PID/VID
2. support IPQ QSDK MHI used with rmnetdata driver
Release Notes
[V1.3.6]
Date: 01/08/2023
enhancement:
1. support Linux Kernel V6.4
2. support change mtu
fix:
1. fix compile error on ipq's spf12.x
Release Notes
[V1.3.5]
Date: 25/02/2023
enhancement:
1. support efuse SDX sleep
2. support IPQ9574 SFE
fix:
1. fix cannot find the node when dialing. Nodes in the /sys/bus/mhi_q/devices directory named hex
[V1.3.4]
Date: 12/8/2022

View File

@ -1066,9 +1066,12 @@ static struct pci_device_id mhi_pcie_device_id[] = {
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, //SDX24
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0308)}, //SDX62
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x011a)}, //SDX35
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0309)}, //SDX7X
{PCI_DEVICE(0x1eac, 0x1001)}, //EM120
{PCI_DEVICE(0x1eac, 0x1002)}, //EM160
{PCI_DEVICE(0x1eac, 0x1004)}, //RM520
{PCI_DEVICE(0x1eac, 0x100b)}, //RM255
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)},
{0},
};

View File

@ -4,7 +4,7 @@
#ifndef _MHI_H_
#define _MHI_H_
#define PCIE_MHI_DRIVER_VERSION "V1.3.6"
#define PCIE_MHI_DRIVER_VERSION "V1.3.8"
#define ENABLE_MHI_MON
//#define ENABLE_IP_SW0

View File

@ -1642,7 +1642,7 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
root@OpenWrt:/# cat /proc/interrupts | grep mhi
root@OpenWrt:/# cat /sys/kernel/debug/mhi_q/mhi_netdev/pcie_mhi_0306_00.01.00_0/rx_int
*/
if (i == IPA_IN_EVENT_RING)
if (i == IPA_IN_EVENT_RING || i == IPA_OUT_EVENT_RING)
mhi_event->intmod = 5;
#ifdef ENABLE_IP_SW0
@ -1828,6 +1828,7 @@ static struct chan_cfg_t chan_cfg[] = {
};
extern int mhi_netdev_mbin_enabled(void);
extern int mhi_netdev_use_xfer_type_dma(unsigned chan);
static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl,
struct device_node *of_node)
{
@ -1942,7 +1943,9 @@ static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl,
if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_SW_0_OUT)
mhi_chan->xfer_type = MHI_XFER_SKB;
else if (chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_IP_SW_0_IN)
else if (chan == MHI_CLIENT_IP_HW_0_IN)
mhi_chan->xfer_type = mhi_netdev_use_xfer_type_dma(chan) ? MHI_XFER_DMA: MHI_XFER_SKB;
else if (chan == MHI_CLIENT_IP_SW_0_IN)
mhi_chan->xfer_type = MHI_XFER_SKB; //MHI_XFER_DMA;
#ifdef ENABLE_ADPL
else if (chan == MHI_CLIENT_ADPL)

View File

@ -31,6 +31,8 @@
#include <net/tcp.h>
#include <linux/usb/cdc.h>
#include <rmnet_nss.h>
//#define CONFIG_IPQ5018_RATE_CONTROL //Only used with spf11.5 for IPQ5018
#if defined(CONFIG_IPQ5018_RATE_CONTROL)
//#include <linux/jiffies.h>
@ -45,11 +47,6 @@
#define ETH_P_MAP 0xDA1A
#endif
#if (ETH_P_MAP == 0x00F9)
#undef ETH_P_MAP
#define ETH_P_MAP 0xDA1A
#endif
#ifndef ARPHRD_RAWIP
#define ARPHRD_RAWIP ARPHRD_NONE
#endif
@ -62,16 +59,12 @@ static bool netdev_is_rx_handler_busy(struct net_device *dev)
}
#endif
struct rmnet_nss_cb {
int (*nss_create)(struct net_device *dev);
int (*nss_free)(struct net_device *dev);
int (*nss_tx)(struct sk_buff *skb);
};
static struct rmnet_nss_cb __read_mostly *nss_cb = NULL;
#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018) || defined(CONFIG_PINCTRL_IPQ8074)
//#ifdef CONFIG_RMNET_DATA //spf12.x have no macro defined, just for spf11.x
#define CONFIG_QCA_NSS_DRV
/* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */ //for spf11.x
#define CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY
/* define at qca/src/linux-4.4/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c */ //for spf11.x
/* define at qsdk/qca/src/datarmnet/core/rmnet_config.c */ //for spf12.x
/* set at qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c */
/* need add DEPENDS:= kmod-rmnet-core in feeds/makefile */
@ -79,6 +72,17 @@ extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly;
//#endif
#endif
int mhi_netdev_use_xfer_type_dma(unsigned chan)
{
(void)chan;
#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY
return 1;
#endif
return 0;
}
static const unsigned char node_id[ETH_ALEN] = {0x02, 0x50, 0xf4, 0x00, 0x00, 0x00};
static const unsigned char default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00};
@ -199,6 +203,8 @@ static void qmap_hex_dump(const char *tag, unsigned char *data, unsigned len) {
}
#endif
#define MBIM_MUX_ID_SDX7X 112 //sdx7x is 112-126, others is 0-14
static uint __read_mostly mhi_mbim_enabled = 0;
module_param(mhi_mbim_enabled, uint, S_IRUGO);
int mhi_netdev_mbin_enabled(void) { return mhi_mbim_enabled; }
@ -295,6 +301,22 @@ enum mhi_net_type {
MHI_NET_ETHER
};
#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY
/* Try not to make this structure bigger than 128 bytes, since this take space
* in payload packet.
* Example: If MRU = 16K, effective MRU = 16K - sizeof(mhi_netbuf)
*/
struct mhi_netbuf {
struct mhi_buf mhi_buf; /* this must be first element */
void (*unmap)(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir);
};
struct mhi_net_chain {
struct sk_buff *head, *tail; /* chained skb */
};
#endif
//#define TS_DEBUG
struct mhi_netdev {
int alias;
@ -318,6 +340,7 @@ struct mhi_netdev {
#endif
MHI_MBIM_CTX mbim_ctx;
u32 mbim_mux_id;
u32 mru;
u32 max_mtu;
@ -328,6 +351,14 @@ struct mhi_netdev {
struct sk_buff *frag_skb;
bool recycle_buf;
#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY
u32 order;
struct mhi_netbuf **netbuf_pool;
int pool_size; /* must be power of 2 */
int current_index;
struct mhi_net_chain chain;
#endif
#if defined(MHI_NETDEV_STATUS64)
struct pcpu_sw_netstats __percpu *stats64;
#endif
@ -621,7 +652,7 @@ static struct sk_buff * add_mbim_hdr(struct sk_buff *skb, u8 mux_id) {
struct mhi_mbim_hdr *mhdr;
__le32 sign;
u8 *c;
u16 tci = mux_id - QUECTEL_QMAP_MUX_ID;
u16 tci = mux_id;
unsigned int skb_len = skb->len;
if (qmap_mode > 1)
@ -1274,12 +1305,12 @@ static void rmnet_mbim_rx_handler(void *dev, struct sk_buff *skb_in)
goto error;
}
if ((qmap_mode == 1 && tci != 0) || (qmap_mode > 1 && tci > qmap_mode)) {
if ((qmap_mode == 1 && tci != mhi_netdev->mbim_mux_id) || (qmap_mode > 1 && (tci - mhi_netdev->mbim_mux_id) > qmap_mode)){
MSG_ERR("unsupported tci %d by now\n", tci);
goto error;
}
tci = abs(tci);
qmap_net = pQmapDev->mpQmapNetDev[qmap_mode == 1 ? 0 : tci - 1];
qmap_net = pQmapDev->mpQmapNetDev[qmap_mode == 1 ? 0 : tci - 1 - mhi_netdev->mbim_mux_id];
dpe16 = ndp16->dpe16;
@ -1963,6 +1994,253 @@ static void mhi_netdev_dealloc(struct mhi_netdev *mhi_netdev)
}
}
#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY
static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev,
gfp_t gfp,
unsigned int order)
{
struct page *page;
struct mhi_netbuf *netbuf;
struct mhi_buf *mhi_buf;
void *vaddr;
page = __dev_alloc_pages(gfp, order);
if (!page)
return NULL;
vaddr = page_address(page);
/* we going to use the end of page to store cached data */
netbuf = vaddr + (PAGE_SIZE << order) - sizeof(*netbuf);
mhi_buf = (struct mhi_buf *)netbuf;
mhi_buf->page = page;
mhi_buf->buf = vaddr;
mhi_buf->len = (void *)netbuf - vaddr;
mhi_buf->dma_addr = dma_map_page(dev, page, 0, mhi_buf->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, mhi_buf->dma_addr)) {
__free_pages(mhi_buf->page, order);
return NULL;
}
return netbuf;
}
static void mhi_netdev_unmap_page(struct device *dev,
dma_addr_t dma_addr,
size_t len,
enum dma_data_direction dir)
{
dma_unmap_page(dev, dma_addr, len, dir);
}
static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev, int nr_tre)
{
struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
struct device *dev = mhi_dev->dev.parent;
const u32 order = mhi_netdev->order;
int i, ret;
for (i = 0; i < nr_tre; i++) {
struct mhi_buf *mhi_buf;
struct mhi_netbuf *netbuf = mhi_netdev_alloc(dev, GFP_ATOMIC,
order);
if (!netbuf)
return -ENOMEM;
mhi_buf = (struct mhi_buf *)netbuf;
netbuf->unmap = mhi_netdev_unmap_page;
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
mhi_buf->len, MHI_EOT);
if (unlikely(ret)) {
MSG_ERR("Failed to queue transfer, ret:%d\n", ret);
mhi_netdev_unmap_page(dev, mhi_buf->dma_addr,
mhi_buf->len, DMA_FROM_DEVICE);
__free_pages(mhi_buf->page, order);
return ret;
}
}
return 0;
}
static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev)
{
struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
struct device *dev = mhi_dev->dev.parent;
struct mhi_netbuf *netbuf;
struct mhi_buf *mhi_buf;
struct mhi_netbuf **netbuf_pool = mhi_netdev->netbuf_pool;
int nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
int i, peak, cur_index, ret;
const int pool_size = mhi_netdev->pool_size - 1, max_peak = 4;
MSG_VERB("Enter free_desc:%d\n", nr_tre);
if (!nr_tre)
return;
/* try going thru reclaim pool first */
for (i = 0; i < nr_tre; i++) {
/* peak for the next buffer, we going to peak several times,
* and we going to give up if buffers are not yet free
*/
cur_index = mhi_netdev->current_index;
netbuf = NULL;
for (peak = 0; peak < max_peak; peak++) {
struct mhi_netbuf *tmp = netbuf_pool[cur_index];
mhi_buf = &tmp->mhi_buf;
cur_index = (cur_index + 1) & pool_size;
/* page == 1 idle, buffer is free to reclaim */
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,4,0 ))
if (atomic_read(&mhi_buf->page->_count) == 1)
#else
if (atomic_read(&mhi_buf->page->_refcount) == 1)
#endif
{
netbuf = tmp;
break;
}
}
/* could not find a free buffer */
if (!netbuf)
break;
/* increment reference count so when network stack is done
* with buffer, the buffer won't be freed
*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,4,0 ))
atomic_inc(&mhi_buf->page->_count);
#else
atomic_inc(&mhi_buf->page->_refcount);
#endif
dma_sync_single_for_device(dev, mhi_buf->dma_addr, mhi_buf->len,
DMA_FROM_DEVICE);
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
mhi_buf->len, MHI_EOT);
if (unlikely(ret)) {
MSG_ERR("Failed to queue buffer, ret:%d\n", ret);
netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len,
DMA_FROM_DEVICE);
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,4,0 ))
atomic_dec(&mhi_buf->page->_count);
#else
atomic_dec(&mhi_buf->page->_refcount);
#endif
return;
}
mhi_netdev->current_index = cur_index;
}
/* recyling did not work, buffers are still busy allocate temp pkts */
if (i < nr_tre)
mhi_netdev_tmp_alloc(mhi_netdev, nr_tre - i);
}
/* allocating pool of memory */
static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev)
{
int i;
struct mhi_netbuf *netbuf, **netbuf_pool;
struct mhi_buf *mhi_buf;
const u32 order = mhi_netdev->order;
struct device *dev = mhi_netdev->mhi_dev->dev.parent;
netbuf_pool = kmalloc_array(mhi_netdev->pool_size, sizeof(*netbuf_pool),
GFP_KERNEL);
if (!netbuf_pool)
return -ENOMEM;
for (i = 0; i < mhi_netdev->pool_size; i++) {
/* allocate paged data */
netbuf = mhi_netdev_alloc(dev, GFP_KERNEL, order);
if (!netbuf)
goto error_alloc_page;
netbuf->unmap = dma_sync_single_for_cpu;
netbuf_pool[i] = netbuf;
}
mhi_netdev->netbuf_pool = netbuf_pool;
return 0;
error_alloc_page:
for (--i; i >= 0; i--) {
netbuf = netbuf_pool[i];
mhi_buf = &netbuf->mhi_buf;
dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
DMA_FROM_DEVICE);
__free_pages(mhi_buf->page, order);
}
kfree(netbuf_pool);
return -ENOMEM;
}
static void mhi_netdev_free_pool(struct mhi_netdev *mhi_netdev)
{
int i;
struct mhi_netbuf *netbuf, **netbuf_pool = mhi_netdev->netbuf_pool;
struct device *dev = mhi_netdev->mhi_dev->dev.parent;
struct mhi_buf *mhi_buf;
for (i = 0; i < mhi_netdev->pool_size; i++) {
netbuf = netbuf_pool[i];
mhi_buf = &netbuf->mhi_buf;
dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
DMA_FROM_DEVICE);
__free_pages(mhi_buf->page, mhi_netdev->order);
}
kfree(mhi_netdev->netbuf_pool);
mhi_netdev->netbuf_pool = NULL;
}
static int mhi_netdev_poll(struct napi_struct *napi, int budget)
{
struct net_device *dev = napi->dev;
struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev);
struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev;
struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
struct mhi_net_chain *chain = &mhi_netdev->chain;
int rx_work = 0;
MSG_VERB("Entered\n");
rx_work = mhi_poll(mhi_dev, budget);
/* chained skb, push it to stack */
if (chain && chain->head) {
netif_receive_skb(chain->head);
chain->head = NULL;
}
if (rx_work < 0) {
MSG_ERR("Error polling ret:%d\n", rx_work);
napi_complete(napi);
return 0;
}
/* queue new buffers */
mhi_netdev_queue(mhi_netdev);
/* complete work if # of packet processed less than allocated budget */
if (rx_work < budget)
napi_complete(napi);
MSG_VERB("polled %d pkts\n", rx_work);
return rx_work;
}
#else
static int mhi_netdev_poll(struct napi_struct *napi, int budget)
{
struct net_device *dev = napi->dev;
@ -2040,6 +2318,7 @@ static int mhi_netdev_poll(struct napi_struct *napi, int budget)
return rx_work;
}
#endif
static int mhi_netdev_open(struct net_device *ndev)
{
@ -2110,7 +2389,7 @@ static netdev_tx_t mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (mhi_netdev->net_type == MHI_NET_MBIM) {
if (add_mbim_hdr(skb, QUECTEL_QMAP_MUX_ID) == NULL) {
if (add_mbim_hdr(skb, mhi_netdev->mbim_mux_id) == NULL) {
dev_kfree_skb_any (skb);
return NETDEV_TX_OK;
}
@ -2268,7 +2547,7 @@ static int qmap_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case 0x89F3: //SIOCDEVPRIVATE
if (mhi_netdev->use_rmnet_usb) {
rc = copy_to_user(ifr->ifr_ifru.ifru_data, &mhi_netdev->rmnet_info, sizeof(RMNET_INFO));
rc = copy_to_user(ifr->ifr_ifru.ifru_data, &mhi_netdev->rmnet_info, sizeof(mhi_netdev->rmnet_info));
}
break;
@ -2436,6 +2715,34 @@ static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev)
mhi_netdev->enabled = true;
write_unlock_irq(&mhi_netdev->pm_lock);
#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY
/* MRU must be multiplication of page size */
mhi_netdev->order = 1;
while ((PAGE_SIZE << mhi_netdev->order) < mhi_netdev->mru)
mhi_netdev->order += 1;
/* setup pool size ~2x ring length*/
no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
mhi_netdev->pool_size = 1 << __ilog2_u32(no_tre);
if (no_tre > mhi_netdev->pool_size)
mhi_netdev->pool_size <<= 1;
mhi_netdev->pool_size <<= 1;
/* allocate memory pool */
ret = mhi_netdev_alloc_pool(mhi_netdev);
if (ret) {
MSG_ERR("mhi_netdev_alloc_pool Fail!\n");
goto error_start;
}
napi_enable(&mhi_netdev->napi);
/* now we have a pool of buffers allocated, queue to hardware
* by triggering a napi_poll
*/
napi_schedule(&mhi_netdev->napi);
error_start:
#else
/* queue buffer for rx path */
no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_KERNEL);
@ -2443,6 +2750,7 @@ static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev)
schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20));
napi_enable(&mhi_netdev->napi);
#endif
MSG_LOG("Exited.\n");
@ -2499,6 +2807,49 @@ static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev,
dev_kfree_skb(skb);
}
#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY
static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev);
struct mhi_netbuf *netbuf = mhi_result->buf_addr;
struct mhi_buf *mhi_buf = &netbuf->mhi_buf;
struct sk_buff *skb;
struct net_device *ndev = mhi_netdev->ndev;
struct device *dev = mhi_dev->dev.parent;
struct mhi_net_chain *chain = &mhi_netdev->chain;
netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, DMA_FROM_DEVICE);
/* modem is down, drop the buffer */
if (mhi_result->transaction_status == -ENOTCONN) {
__free_pages(mhi_buf->page, mhi_netdev->order);
return;
}
mhi_netdev_upate_rx_stats(mhi_netdev, 1, mhi_result->bytes_xferd);
/* we support chaining */
skb = alloc_skb(0, GFP_ATOMIC);
if (likely(skb)) {
skb_add_rx_frag(skb, 0, mhi_buf->page, 0,
mhi_result->bytes_xferd, mhi_netdev->mru);
/* this is first on list */
if (!chain->head) {
skb->dev = ndev;
skb->protocol = htons(ETH_P_MAP);
chain->head = skb;
} else {
skb_shinfo(chain->tail)->frag_list = skb;
}
chain->tail = skb;
} else {
__free_pages(mhi_buf->page, mhi_netdev->order);
}
}
#else
static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
@ -2604,6 +2955,7 @@ static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
skb_priv->bind_netdev = NULL;
skb_queue_tail(&mhi_netdev->qmap_chain, skb);
}
#endif
static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb)
{
@ -2812,33 +3164,29 @@ static void mhi_netdev_remove(struct mhi_device *mhi_dev)
struct sk_buff *skb;
MSG_LOG("Remove notification received\n");
#ifndef MHI_NETDEV_ONE_CARD_MODE
#ifndef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY
unsigned i;
write_lock_irq(&mhi_netdev->pm_lock);
mhi_netdev->enabled = false;
write_unlock_irq(&mhi_netdev->pm_lock);
if (mhi_netdev->use_rmnet_usb) {
#ifndef MHI_NETDEV_ONE_CARD_MODE
unsigned i;
for (i = 0; i < mhi_netdev->qmap_mode; i++) {
if (mhi_netdev->mpQmapNetDev[i]) {
if (mhi_netdev->mpQmapNetDev[i]
&& mhi_netdev->mpQmapNetDev[i] != mhi_netdev->ndev) {
rmnet_vnd_unregister_device(mhi_netdev->mpQmapNetDev[i]);
mhi_netdev->mpQmapNetDev[i] = NULL;
}
mhi_netdev->mpQmapNetDev[i] = NULL;
}
rtnl_lock();
#ifdef ANDROID_gki
if (mhi_netdev->ndev && rtnl_dereference(mhi_netdev->ndev->rx_handler))
#else
if (netdev_is_rx_handler_busy(mhi_netdev->ndev))
#endif
if (mhi_netdev->ndev
&& rtnl_dereference(mhi_netdev->ndev->rx_handler) == rmnet_rx_handler)
netdev_rx_handler_unregister(mhi_netdev->ndev);
rtnl_unlock();
#endif
}
#endif
while ((skb = skb_dequeue (&mhi_netdev->skb_chain)))
dev_kfree_skb_any(skb);
while ((skb = skb_dequeue (&mhi_netdev->qmap_chain)))
@ -2857,6 +3205,9 @@ static void mhi_netdev_remove(struct mhi_device *mhi_dev)
#endif
free_netdev(mhi_netdev->ndev);
flush_delayed_work(&mhi_netdev->alloc_work);
#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY
mhi_netdev_free_pool(mhi_netdev);
#endif
if (!IS_ERR_OR_NULL(mhi_netdev->dentry))
debugfs_remove_recursive(mhi_netdev->dentry);
@ -2867,6 +3218,7 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
{
int ret;
struct mhi_netdev *mhi_netdev;
unsigned i;
mhi_netdev = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_netdev),
GFP_KERNEL);
@ -2923,6 +3275,9 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
if ((mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0306)
|| (mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0308)
|| (mhi_dev->vendor == 0x1eac && mhi_dev->dev_id == 0x1004)
|| (mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x011a)
|| (mhi_dev->vendor == 0x1eac && mhi_dev->dev_id == 0x100b)
|| (mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0309)
) {
mhi_netdev->qmap_version = 9;
}
@ -2931,6 +3286,11 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
mhi_netdev->qmap_version = 0;
mhi_netdev->use_rmnet_usb = 0;
}
mhi_netdev->mbim_mux_id = 0;
if (mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0309) {
mhi_netdev->mbim_mux_id = MBIM_MUX_ID_SDX7X;
}
rmnet_info_set(mhi_netdev, &mhi_netdev->rmnet_info);
mhi_netdev->rx_queue = mhi_netdev_alloc_skb;
@ -2958,16 +3318,32 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
mhi_netdev->mpQmapNetDev[0] = mhi_netdev->ndev;
netif_carrier_on(mhi_netdev->ndev);
}
else if (mhi_netdev->use_rmnet_usb) {
#ifdef MHI_NETDEV_ONE_CARD_MODE
else if (1) {
mhi_netdev->mpQmapNetDev[0] = mhi_netdev->ndev;
strcpy(mhi_netdev->rmnet_info.ifname[0], mhi_netdev->mpQmapNetDev[0]->name);
mhi_netdev->rmnet_info.mux_id[0] = QUECTEL_QMAP_MUX_ID;
if (mhi_mbim_enabled) {
mhi_netdev->rmnet_info.mux_id[0] = mhi_netdev->mbim_mux_id;
}
}
#else
unsigned i;
#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY
else if (1) {
BUG_ON(mhi_netdev->net_type != MHI_NET_RMNET);
for (i = 0; i < mhi_netdev->qmap_mode; i++) {
mhi_netdev->rmnet_info.mux_id[i] = QUECTEL_QMAP_MUX_ID + i;
strcpy(mhi_netdev->rmnet_info.ifname[i], "use_rmnet_data");
}
}
#endif
else if (mhi_netdev->use_rmnet_usb) {
for (i = 0; i < mhi_netdev->qmap_mode; i++) {
u8 mux_id = QUECTEL_QMAP_MUX_ID+i;
if (mhi_mbim_enabled) {
mux_id = mhi_netdev->mbim_mux_id + i;
}
mhi_netdev->mpQmapNetDev[i] = rmnet_vnd_register_device(mhi_netdev, i, mux_id);
if (mhi_netdev->mpQmapNetDev[i]) {
strcpy(mhi_netdev->rmnet_info.ifname[i], mhi_netdev->mpQmapNetDev[i]->name);
@ -2980,11 +3356,11 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
//netdev_rx_handler_register(mhi_netdev->ndev, rmnet_rx_handler, mhi_netdev);
netdev_rx_handler_register(mhi_netdev->ndev, rmnet_rx_handler, NULL);
rtnl_unlock();
#endif
}
#if defined(CONFIG_IPQ5018_RATE_CONTROL)
mhi_netdev->mhi_rate_control = 1;
#endif
#endif
return 0;
@ -3032,3 +3408,11 @@ void mhi_device_netdev_exit(void)
#endif
mhi_driver_unregister(&mhi_netdev_driver);
}
void mhi_netdev_quectel_avoid_unused_function(void) {
#ifdef CONFIG_USE_RMNET_DATA_FOR_SKIP_MEMCPY
qmap_hex_dump(NULL, NULL, 0);
mhi_netdev_ip_type_trans(0);
#else
#endif
}