wwan: Initial attempt at NSS offload

Initial attempt at getting NSS offload working with 4g/5g based
IPQ807x/50xx platforms.

I don't have a unit to test this on but everything seems to compile
after applying a few patches.

Ensure you are using the "NSS-12.5-K6.x-wwan" branch in your `feeds.conf`

```
src-git nss_packages https://github.com/qosmio/nss-packages.git;NSS-12.5-K6.x-wwan
```

Then install all packages
```
./scripts/feeds install -p nss_packages -a
```

Ensure you select the modules:
```
kmod-pcie_mhi
kmod-qmi_wwan_q
```

Work was derived from `https://github.com/coolsnowwolf/lede` and
`https://git.codelinaro.org/clo/qsdk/platform/vendor/qcom/opensource/qsdk/datarmnet`

Signed-off-by: Sean Khan <datapronix@protonmail.com>
This commit is contained in:
Sean Khan 2024-06-23 17:24:46 -04:00
parent 1af951126c
commit 4b1cb1d940
111 changed files with 53601 additions and 0 deletions

View File

@ -160,6 +160,10 @@ ifneq ($(CONFIG_PACKAGE_kmod-bonding),)
ECM_MAKE_OPTS+=ECM_INTERFACE_BOND_ENABLE=y
endif
ifneq ($(CONFIG_PACKAGE_kmod-qmi_wwan_q),)
ECM_MAKE_OPTS+=ECM_INTERFACE_RAWIP_ENABLE=y
endif
define Build/InstallDev
mkdir -p $(1)/usr/include/qca-nss-ecm
$(CP) $(PKG_BUILD_DIR)/exports/* $(1)/usr/include/qca-nss-ecm

View File

@ -0,0 +1,15 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
LUCI_TITLE:=PCI Modem Server
LUCI_DEPENDS:=+kmod-pcie_mhi +pciutils +quectel-CM-5G
include $(TOPDIR)/feeds/luci/luci.mk
# call BuildPackage - OpenWrt buildroot signature

View File

@ -0,0 +1,9 @@
module("luci.controller.pcimodem", package.seeall)
function index()
if not nixio.fs.access("/etc/config/pcimodem") then
return
end
entry({"admin", "network", "pcimodem"}, cbi("pcimodem"), _("PCI Modem Server"), 80).dependent = false
end

View File

@ -0,0 +1,39 @@
-- Copyright 2016 David Thornley <david.thornley@touchstargroup.com>
-- Licensed to the public under the Apache License 2.0.
mp = Map("pcimodem")
mp.title = translate("PCI Modem Server")
mp.description = translate("Modem Server For OpenWrt")
s = mp:section(TypedSection, "service", "Base Setting")
s.anonymous = true
enabled = s:option(Flag, "enabled", translate("Enable"))
enabled.default = 0
enabled.rmempty = false
apn = s:option(Value, "apn", translate("APN"))
apn.rmempty = true
pincode = s:option(Value, "pincode", translate("PIN"))
pincode.rmempty = true
username = s:option(Value, "username", translate("PAP/CHAP username"))
username.rmempty = true
password = s:option(Value, "password", translate("PAP/CHAP password"))
password.rmempty = true
auth = s:option(Value, "auth", translate("Authentication Type"))
auth.rmempty = true
auth:value("", translate("-- Please choose --"))
auth:value("both", "PAP/CHAP (both)")
auth:value("pap", "PAP")
auth:value("chap", "CHAP")
auth:value("none", "NONE")
tool = s:option(Value, "tool", translate("Tools"))
tool:value("quectel-CM", "quectel-CM")
tool.rmempty = true
return mp

View File

@ -0,0 +1,4 @@
config service
option tool 'quectel-CM'
option enabled '0'

View File

@ -0,0 +1,75 @@
#!/bin/sh /etc/rc.common
# Copyright (C) 2006-2014 OpenWrt.org
START=99
STOP=16
USE_PROCD=1
#使用procd启动
run_5g()
{
local enabled
config_get_bool enabled $1 enabled
echo "run 4G" >> /tmp/log4g
if [ "$enabled" = "1" ]; then
local user
local password
local apn
local auth
local pincode
local tool
# echo "enable 5G" >> /tmp/log5g
config_get user $1 user
config_get password $1 password
config_get apn $1 apn
config_get auth $1 auth
config_get pincode $1 pincode
config_get tool $1 tool
config_get tty $1 tty
config_get atcmd $1 atcmd
if [ "$tool" = "at" ];then
at_tool "$atcmd" -d $tty
else
procd_open_instance
#创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE?
#ubus call service list 可以查看实例
procd_set_param command $tool -i rmnet_mhi0 -s $apn
if [ "$password" != "" ];then
procd_append_param command $user $password $auth
fi
if [ "$pincode" != "" ]; then
procd_append_param command -p $pincode
fi
# procd_append_param command -f /tmp/4g.log
procd_set_param respawn
echo "quectel-CM has started."
procd_close_instance
#关闭实例
fi
fi
}
service_triggers()
{
procd_add_reload_trigger "pcimodem"
}
start_service() {
config_load pcimodem
config_foreach run_5g service
}
stop_service()
{
echo "5G stop" >> /tmp/log5g
killall quectel-CM
echo "quectel-CM has stoped."
}

View File

@ -0,0 +1,11 @@
#!/bin/sh
uci -q batch <<-EOF >/dev/null
delete ucitrack.@pcimodem[-1]
add ucitrack pcimodem
set ucitrack.@pcimodem[-1].init=pcimodem
commit ucitrack
EOF
rm -f /tmp/luci-indexcache
exit 0

View File

@ -0,0 +1,19 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
LUCI_TITLE:=Modem Server
LUCI_DEPENDS:=+luci-compat +quectel-CM-5G +kmod-usb-acm \
+kmod-usb-net-cdc-ether +kmod-usb-net-cdc-mbim \
+kmod-usb-net-qmi-wwan +kmod-usb-net-rndis \
+kmod-usb-serial-option +kmod-usb-wdm \
+kmod-qmi_wwan_f +kmod-qmi_wwan_q
include $(TOPDIR)/feeds/luci/luci.mk
# call BuildPackage - OpenWrt buildroot signature

View File

@ -0,0 +1,9 @@
module("luci.controller.usbmodem", package.seeall)
function index()
if not nixio.fs.access("/etc/config/usbmodem") then
return
end
entry({"admin", "network", "usbmodem"}, cbi("usbmodem"), _("USB Modem Server"), 80).dependent = false
end

View File

@ -0,0 +1,51 @@
-- Copyright 2016 David Thornley <david.thornley@touchstargroup.com>
-- Licensed to the public under the Apache License 2.0.
mp = Map("usbmodem")
mp.title = translate("USB Modem Server")
mp.description = translate("Modem Server For OpenWrt")
s = mp:section(TypedSection, "service", "Base Setting")
s.anonymous = true
enabled = s:option(Flag, "enabled", translate("Enable"))
enabled.default = 0
enabled.rmempty = false
device = s:option(Value, "device", translate("Modem device"))
device.rmempty = false
local device_suggestions = nixio.fs.glob("/dev/cdc-wdm*")
if device_suggestions then
local node
for node in device_suggestions do
device:value(node)
end
end
apn = s:option(Value, "apn", translate("APN"))
apn.rmempty = true
pincode = s:option(Value, "pincode", translate("PIN"))
pincode.rmempty = true
username = s:option(Value, "username", translate("PAP/CHAP username"))
username.rmempty = true
password = s:option(Value, "password", translate("PAP/CHAP password"))
password.rmempty = true
auth = s:option(Value, "auth", translate("Authentication Type"))
auth.rmempty = true
auth:value("", translate("-- Please choose --"))
auth:value("both", "PAP/CHAP (both)")
auth:value("pap", "PAP")
auth:value("chap", "CHAP")
auth:value("none", "NONE")
tool = s:option(Value, "tool", translate("Tools"))
tool:value("quectel-CM", "quectel-CM")
tool.rmempty = true
return mp

View File

@ -0,0 +1,5 @@
config service
option tool 'quectel-CM'
option device '/dev/cdc-wdm0'
option enabled '0'

View File

@ -0,0 +1,80 @@
#!/bin/sh /etc/rc.common
# Copyright (C) 2006-2014 OpenWrt.org
START=99
STOP=16
USE_PROCD=1
#使用procd启动
run_4g()
{
local enabled
config_get_bool enabled $1 enabled
echo "run 4G" >> /tmp/log4g
if [ "$enabled" = "1" ]; then
local user
local password
local apn
local auth
local pincode
local device
local tool
# echo "enable 4G" >> /tmp/log4g
config_get user $1 user
config_get password $1 password
config_get apn $1 apn
config_get auth $1 auth
config_get pincode $1 pincode
config_get device $1 device
config_get tool $1 tool
config_get tty $1 tty
config_get atcmd $1 atcmd
devname="$(basename "$device")"
devpath="$(readlink -f /sys/class/usbmisc/$devname/device/)"
ifname="$( ls "$devpath"/net )"
if [ "$tool" = "at" ];then
at_tool "$atcmd" -d $tty
else
procd_open_instance
#创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE?
#ubus call service list 可以查看实例
procd_set_param command $tool -i $ifname -s $apn
if [ "$password" != "" ];then
procd_append_param command $user $password $auth
fi
if [ "$pincode" != "" ]; then
procd_append_param command -p $pincode
fi
# procd_append_param command -f /tmp/4g.log
procd_set_param respawn
echo "quectel-CM has started."
procd_close_instance
#关闭实例
fi
fi
}
service_triggers()
{
procd_add_reload_trigger "usbmodem"
}
start_service() {
config_load usbmodem
config_foreach run_4g service
}
stop_service()
{
echo "4G stop" >> /tmp/log4g
killall quectel-CM
echo "quectel-CM has stoped."
}

View File

@ -0,0 +1,11 @@
#!/bin/sh
uci -q batch <<-EOF >/dev/null
delete ucitrack.@usbmodem[-1]
add ucitrack usbmodem
set ucitrack.@usbmodem[-1].init=usbmodem
commit ucitrack
EOF
rm -f /tmp/luci-indexcache
exit 0

View File

@ -0,0 +1,39 @@
include $(TOPDIR)/rules.mk
PKG_NAME:= quectel-CM-5G
PKG_VERSION:=1.6.5
PKG_RELEASE:=1
include $(INCLUDE_DIR)/package.mk
define Package/quectel-CM-5G
SECTION:=utils
CATEGORY:=Utilities
TITLE:=quectel-CM-5G app
endef
define Build/Prepare
mkdir -p $(PKG_BUILD_DIR)
$(CP) ./src/* $(PKG_BUILD_DIR)/
endef
define Build/Compile
$(MAKE) -C "$(PKG_BUILD_DIR)" \
EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
ARCH="$(LINUX_KARCH)" \
M="$(PKG_BUILD_DIR)" \
CC="$(TARGET_CC)"
endef
define Package/quectel-CM-5G/install
$(INSTALL_DIR) $(1)/usr/bin $(1)/lib/netifd/proto
$(INSTALL_BIN) $(PKG_BUILD_DIR)/quectel-CM $(1)/usr/bin
$(INSTALL_BIN) ./files/rmnet_init.sh $(1)/lib/netifd
$(INSTALL_BIN) ./files/rmnet.script $(1)/lib/netifd
$(INSTALL_BIN) ./files/rmnet.sh $(1)/lib/netifd/proto
$(INSTALL_BIN) ./files/rmnet6.sh $(1)/lib/netifd/proto
$(INSTALL_BIN) ./files/rmnet6.script $(1)/lib/netifd
endef
$(eval $(call BuildPackage,quectel-CM-5G))

View File

@ -0,0 +1,48 @@
config dnsmasq
option domainneeded '1'
option boguspriv '1'
option filterwin2k '0'
option localise_queries '1'
option rebind_protection '1'
option rebind_localhost '1'
option local '/lan/'
option domain 'lan'
option expandhosts '1'
option nonegcache '0'
option authoritative '1'
option readethers '1'
option leasefile '/tmp/dhcp.leases'
option resolvfile '/tmp/resolv.conf.auto'
option nonwildcard '1'
option localservice '1'
config dhcp 'lan'
option interface 'lan'
option start '100'
option limit '150'
option leasetime '12h'
option ra 'relay'
option dhcpv6 'disabled'
option ndp 'relay'
config dhcp 'wan'
option interface 'wan'
option ignore '1'
option ra 'relay'
option dhcpv6 'disabled'
option ndp 'relay'
option ndproxy_routing '0'
option master '1'
config dhcp 'wan6'
option ra 'relay'
option dhcpv6 'disabled'
option ndp 'relay'
option ndproxy_routing '0'
option master '1'
option interface 'wan6'
config odhcpd 'odhcpd'
option loglevel '7'

View File

@ -0,0 +1,66 @@
#!/bin/sh
# Copyright (c) 2019 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
[ -z "$1" ] && echo "Error: should be run by rmnet" && exit 1
[ -z "$2" ] && echo "Error: should be run by rmnet" && exit 1
. /lib/functions.sh
. /lib/functions/network.sh
. /lib/netifd/netifd-proto.sh
setup_interface() {
INTERFACE=$1
CONFIG=/tmp/rmnet_$2_ipv4config
logger "rmnet setup_interface $1 $2 here"
#Fetch information from lower.
[ -f ${CONFIG} ] || {
proto_notify_error "$INTERFACE" "RMNET data call Not ready"
proto_block_restart "$INTERFACE"
return
}
. ${CONFIG}
ip=$PUBLIC_IP
DNS=$DNSSERVERS
router=$GATEWAY
subnet=$NETMASK
interface=$IFNAME
#Send the information to the netifd
proto_init_update "$interface" 1 1
#ip and subnet
proto_add_ipv4_address "$ip" "${subnet:-255.255.255.0}"
#Any router? if not, remove below scripts
#router format should be separated by space
for i in $router; do
proto_add_ipv4_route "$i" 32 "" "$ip"
proto_add_ipv4_route 0.0.0.0 0 "$i" "$ip"
done
#dns information tell the netifd.
for dns in $DNS; do
proto_add_dns_server "$dns"
done
#Domain information tell the netifd
for domain in $domain; do
proto_add_dns_search "$domain"
done
#proto_add_data
[ -n "$ZONE" ] && json_add_string zone "$ZONE"
proto_close_data
proto_send_update "$INTERFACE"
}
case "$1" in
renew)
setup_interface $2 $3
;;
esac
exit 0

View File

@ -0,0 +1,32 @@
#!/bin/sh
# Copyright (c) 2019 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
. /lib/functions.sh
. /lib/functions/network.sh
. ../netifd-proto.sh
init_proto "$@"
proto_rmnet_setup() {
local cfg="$1"
local iface="$2"
logger "rmnet started"
#Call rmnet management script below!!
logger "rmnet updated ${cfg} ${iface}"
/lib/netifd/rmnet.script renew $cfg $iface
}
proto_rmnet_teardown() {
local cfg="$1"
#Tear down rmnet manager script here.*/
}
proto_rmnet_init_config() {
#ddno_device=1
available=1
}
add_protocol rmnet

View File

@ -0,0 +1,61 @@
#!/bin/sh
# Copyright (c) 2019 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
[ -z "$1" ] && echo "Error: should be run by rmnet" && exit 1
[ -z "$2" ] && echo "Error: should be run by rmnet" && exit 1
. /lib/functions.sh
. /lib/functions/network.sh
. /lib/netifd/netifd-proto.sh
setup_interface() {
INTERFACE=$1
CONFIG=/tmp/rmnet_$2_ipv6config
logger "rmnet setup_interface $1 $2 here"
#Fetch information from lower.
[ -f ${CONFIG} ] || {
proto_notify_error "$INTERFACE" "RMNET data call NOT ready"
proto_block_restart "$INTERFACE"
return
}
. ${CONFIG}
ADDRESSES=$PUBLIC_IP
interface=$IFNAME
#Send the information to the netifd
proto_init_update "$interface" 1 1
#ip and subnet
proto_add_ipv6_address "${PUBLIC_IP}" "128"
proto_add_ipv6_prefix "${PUBLIC_IP}/${PrefixLength}"
#router format should be separated by space
proto_add_ipv6_route "$GATEWAY" 128
proto_add_ipv6_route "::0" 0 "$GATEWAY" "" "" "${PUBLIC_IP}/${PrefixLength}"
#dns information tell the netifd.
for dns in $DNSSERVERS; do
proto_add_dns_server "$dns"
done
#Domain information tell the netifd
for domain in $domain; do
proto_add_dns_search "$domain"
done
#proto_add_data
[ -n "$ZONE" ] && json_add_string zone "$ZONE"
proto_close_data
proto_send_update "$INTERFACE"
}
case "$1" in
renew|bound)
setup_interface $2 $3
;;
esac
exit 0

View File

@ -0,0 +1,32 @@
#!/bin/sh
# Copyright (c) 2019 Qualcomm Technologies, Inc.
# All Rights Reserved.
# Confidential and Proprietary - Qualcomm Technologies, Inc.
. /lib/functions.sh
. /lib/functions/network.sh
. ../netifd-proto.sh
init_proto "$@"
proto_rmnet6_setup() {
local cfg="$1"
local iface="$2"
logger "rmnet6 started"
#Call rmnet management script below!!
/lib/netifd/rmnet6.script renew $cfg $iface
logger "rmnet6 updated"
}
proto_rmnet6_teardown() {
local cfg="$1"
#Tear down rmnet manager script here.*/
}
proto_rmnet6_init_config() {
#ddno_device=1
available=1
}
add_protocol rmnet6

View File

@ -0,0 +1,31 @@
#!/bin/sh
uci set network.wan='interface'
uci set network.wan.ifname='wwan0'
uci set network.wan.proto='rmnet'
uci set network.wan6='interface'
uci set network.wan6.ifname='wwan0'
uci set network.wan6.proto='rmnet6'
uci set dhcp.lan.ra='relay'
uci set dhcp.lan.dhcpv6='disabled'
uci set dhcp.lan.ndp='relay'
uci set dhcp.wan.ra='relay'
uci set dhcp.wan.dhcpv6='disabled'
uci set dhcp.wan.ndp='relay'
uci set dhcp.wan.ndproxy_routing='0'
uci set dhcp.wan6=dhcp
uci set dhcp.wan6.interface='wan6'
uci set dhcp.wan6.ra='relay'
uci set dhcp.wan6.dhcpv6='disabled'
uci set dhcp.wan6.ndp='relay'
uci set dhcp.wan6.ndproxy_routing='0'
uci set dhcp.wan6.master='1'
uci set dhcp.odhcpd=odhcpd
uci set dhcp.odhcpd.loglevel='7'
uci commit

View File

@ -0,0 +1,36 @@
cmake_minimum_required(VERSION 2.4)
project(quectel-CM)
add_definitions(-Wall -Wextra -Werror -O1)
option(USE_QRTR "Enable QRTR" OFF)
set( QL_CM_SRC
QmiWwanCM.c GobiNetCM.c main.c MPQMUX.c QMIThread.c util.c qmap_bridge_mode.c mbim-cm.c device.c
atc.c atchannel.c at_tok.c
udhcpc.c
)
if(USE_QRTR)
add_definitions(-DCONFIG_QRTR)
set( QRTR_SRC qrtr.c rmnetctl.c)
endif()
add_executable(quectel-CM ${QL_CM_SRC} ${QRTR_SRC})
target_link_libraries(quectel-CM PUBLIC pthread)
install (TARGETS quectel-CM DESTINATION bin)
add_executable(quectel-qmi-proxy quectel-qmi-proxy.c)
target_link_libraries(quectel-qmi-proxy PUBLIC pthread)
install (TARGETS quectel-qmi-proxy DESTINATION bin)
add_executable(quectel-mbim-proxy quectel-mbim-proxy.c)
target_link_libraries(quectel-mbim-proxy PUBLIC pthread)
install (TARGETS quectel-mbim-proxy DESTINATION bin)
add_executable(quectel-atc-proxy quectel-atc-proxy.c atchannel.c at_tok.c util.c)
target_link_libraries(quectel-atc-proxy PUBLIC pthread)
install (TARGETS quectel-atc-proxy DESTINATION bin)
add_executable(quectel-qrtr-proxy quectel-qrtr-proxy.c)
target_link_libraries(quectel-qrtr-proxy PUBLIC pthread)
install (TARGETS quectel-qrtr-proxy DESTINATION bin)

View File

@ -0,0 +1,246 @@
/******************************************************************************
@file GobiNetCM.c
@brief GobiNet driver.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include <stdio.h>
#include <string.h>
#include <termios.h>
#include <stdio.h>
#include <ctype.h>
#include "QMIThread.h"
#ifdef CONFIG_GOBINET
static int qmiclientId[QMUX_TYPE_ALL];
// IOCTL to generate a client ID for this service type
#define IOCTL_QMI_GET_SERVICE_FILE 0x8BE0 + 1
// IOCTL to get the VIDPID of the device
#define IOCTL_QMI_GET_DEVICE_VIDPID 0x8BE0 + 2
// IOCTL to get the MEID of the device
#define IOCTL_QMI_GET_DEVICE_MEID 0x8BE0 + 3
static int GobiNetSendQMI(PQCQMIMSG pRequest) {
int ret, fd;
fd = qmiclientId[pRequest->QMIHdr.QMIType];
pRequest->QMIHdr.ClientId = (fd&0xFF) ? fd&0xFF : pRequest->QMIHdr.QMIType;
if (fd <= 0) {
dbg_time("%s QMIType: %d has no clientID", __func__, pRequest->QMIHdr.QMIType);
return -ENODEV;
}
// Always ready to write
if (1 == 1) {
ssize_t nwrites = le16_to_cpu(pRequest->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR);
ret = write(fd, &pRequest->MUXMsg, nwrites);
if (ret == nwrites) {
ret = 0;
} else {
dbg_time("%s write=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno));
}
} else {
dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno));
}
return ret;
}
static int GobiNetGetClientID(const char *qcqmi, UCHAR QMIType) {
int ClientId;
ClientId = cm_open_dev(qcqmi);
if (ClientId == -1) {
dbg_time("failed to open %s, errno: %d (%s)", qcqmi, errno, strerror(errno));
return -1;
}
if (ioctl(ClientId, IOCTL_QMI_GET_SERVICE_FILE, QMIType) != 0) {
dbg_time("failed to get ClientID for 0x%02x errno: %d (%s)", QMIType, errno, strerror(errno));
close(ClientId);
ClientId = 0;
}
switch (QMIType) {
case QMUX_TYPE_WDS: dbg_time("Get clientWDS = %d", ClientId); break;
case QMUX_TYPE_DMS: dbg_time("Get clientDMS = %d", ClientId); break;
case QMUX_TYPE_NAS: dbg_time("Get clientNAS = %d", ClientId); break;
case QMUX_TYPE_QOS: dbg_time("Get clientQOS = %d", ClientId); break;
case QMUX_TYPE_WMS: dbg_time("Get clientWMS = %d", ClientId); break;
case QMUX_TYPE_PDS: dbg_time("Get clientPDS = %d", ClientId); break;
case QMUX_TYPE_UIM: dbg_time("Get clientUIM = %d", ClientId); break;
case QMUX_TYPE_COEX: dbg_time("Get clientCOEX = %d", ClientId); break;
case QMUX_TYPE_WDS_ADMIN: dbg_time("Get clientWDA = %d", ClientId);
break;
default: break;
}
return ClientId;
}
static int GobiNetDeInit(void) {
unsigned int i;
for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++)
{
if (qmiclientId[i] != 0)
{
close(qmiclientId[i]);
qmiclientId[i] = 0;
}
}
return 0;
}
static void * GobiNetThread(void *pData) {
PROFILE_T *profile = (PROFILE_T *)pData;
const char *qcqmi = (const char *)profile->qmichannel;
int wait_for_request_quit = 0;
qmiclientId[QMUX_TYPE_WDS] = GobiNetGetClientID(qcqmi, QMUX_TYPE_WDS);
if (profile->enable_ipv6)
qmiclientId[QMUX_TYPE_WDS_IPV6] = GobiNetGetClientID(qcqmi, QMUX_TYPE_WDS);
qmiclientId[QMUX_TYPE_DMS] = GobiNetGetClientID(qcqmi, QMUX_TYPE_DMS);
qmiclientId[QMUX_TYPE_NAS] = GobiNetGetClientID(qcqmi, QMUX_TYPE_NAS);
qmiclientId[QMUX_TYPE_UIM] = GobiNetGetClientID(qcqmi, QMUX_TYPE_UIM);
#ifdef CONFIG_COEX_WWAN_STATE
qmiclientId[QMUX_TYPE_COEX] = GobiNetGetClientID(qcqmi, QMUX_TYPE_COEX);
#endif
if (profile->qmap_mode == 0 || profile->loopback_state) {//when QMAP enabled, set data format in GobiNet Driver
qmiclientId[QMUX_TYPE_WDS_ADMIN] = GobiNetGetClientID(qcqmi, QMUX_TYPE_WDS_ADMIN);
profile->wda_client = qmiclientId[QMUX_TYPE_WDS_ADMIN];
}
//donot check clientWDA, there is only one client for WDA, if quectel-CM is killed by SIGKILL, i cannot get client ID for WDA again!
if (qmiclientId[QMUX_TYPE_WDS] == 0) /*|| (clientWDA == -1)*/ {
GobiNetDeInit();
dbg_time("%s Failed to open %s, errno: %d (%s)", __func__, qcqmi, errno, strerror(errno));
qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED);
pthread_exit(NULL);
return NULL;
}
qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED);
while (1) {
struct pollfd pollfds[16] = {{qmidevice_control_fd[1], POLLIN, 0}};
int ne, ret, nevents = 1;
unsigned int i;
for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++)
{
if (qmiclientId[i] != 0)
{
pollfds[nevents].fd = qmiclientId[i];
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents = 0;
nevents++;
}
}
do {
ret = poll(pollfds, nevents, wait_for_request_quit ? 1000: -1);
} while ((ret < 0) && (errno == EINTR));
if (ret == 0 && wait_for_request_quit) {
QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI()
continue;
}
if (ret <= 0) {
dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno));
break;
}
for (ne = 0; ne < nevents; ne++) {
int fd = pollfds[ne].fd;
short revents = pollfds[ne].revents;
if (revents & (POLLERR | POLLHUP | POLLNVAL)) {
dbg_time("%s poll err/hup/inval", __func__);
dbg_time("epoll fd = %d, events = 0x%04x", fd, revents);
if (fd == qmidevice_control_fd[1]) {
} else {
}
if (revents & (POLLERR | POLLHUP | POLLNVAL))
goto __GobiNetThread_quit;
}
if ((revents & POLLIN) == 0)
continue;
if (fd == qmidevice_control_fd[1]) {
int triger_event;
if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) {
//DBG("triger_event = 0x%x", triger_event);
switch (triger_event) {
case RIL_REQUEST_QUIT:
goto __GobiNetThread_quit;
break;
case SIG_EVENT_STOP:
wait_for_request_quit = 1;
break;
default:
break;
}
}
continue;
}
{
ssize_t nreads;
PQCQMIMSG pResponse = (PQCQMIMSG)cm_recv_buf;
nreads = read(fd, &pResponse->MUXMsg, sizeof(cm_recv_buf) - sizeof(QCQMI_HDR));
if (nreads <= 0)
{
dbg_time("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno));
break;
}
for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++)
{
if (qmiclientId[i] == fd)
{
pResponse->QMIHdr.QMIType = i;
}
}
pResponse->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI;
pResponse->QMIHdr.Length = cpu_to_le16(nreads + sizeof(QCQMI_HDR) - 1);
pResponse->QMIHdr.CtlFlags = 0x00;
pResponse->QMIHdr.ClientId = (fd&0xFF) ? fd&0xFF : pResponse->QMIHdr.QMIType;;
QmiThreadRecvQMI(pResponse);
}
}
}
__GobiNetThread_quit:
GobiNetDeInit();
qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED);
QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI()
dbg_time("%s exit", __func__);
pthread_exit(NULL);
return NULL;
}
const struct qmi_device_ops gobi_qmidev_ops = {
.deinit = GobiNetDeInit,
.send = GobiNetSendQMI,
.read = GobiNetThread,
};
#endif

View File

@ -0,0 +1,46 @@
ifneq ($(CROSS_COMPILE),)
CROSS-COMPILE:=$(CROSS_COMPILE)
endif
#CROSS-COMPILE:=/workspace/buildroot/buildroot-qemu_mips_malta_defconfig/output/host/usr/bin/mips-buildroot-linux-uclibc-
#CROSS-COMPILE:=/workspace/buildroot/buildroot-qemu_arm_vexpress_defconfig/output/host/usr/bin/arm-buildroot-linux-uclibcgnueabi-
#CROSS-COMPILE:=/workspace/buildroot-git/qemu_mips64_malta/output/host/usr/bin/mips-gnu-linux-
ifeq ($(CC),cc)
CC:=$(CROSS-COMPILE)gcc
endif
LD:=$(CROSS-COMPILE)ld
QL_CM_SRC=QmiWwanCM.c GobiNetCM.c main.c QCQMUX.c QMIThread.c util.c qmap_bridge_mode.c mbim-cm.c device.c
QL_CM_SRC+=atc.c atchannel.c at_tok.c
#QL_CM_SRC+=qrtr.c rmnetctl.c
ifeq (1,1)
QL_CM_DHCP=udhcpc.c
else
LIBMNL=libmnl/ifutils.c libmnl/attr.c libmnl/callback.c libmnl/nlmsg.c libmnl/socket.c
DHCP=libmnl/dhcp/dhcpclient.c libmnl/dhcp/dhcpmsg.c libmnl/dhcp/packet.c
QL_CM_DHCP=udhcpc_netlink.c
QL_CM_DHCP+=${LIBMNL}
endif
CFLAGS += -Wall -Wextra -Werror -O1 #-s
LDFLAGS += -lpthread -ldl -lrt
release: clean qmi-proxy mbim-proxy atc-proxy #qrtr-proxy
$(CC) ${CFLAGS} ${QL_CM_SRC} ${QL_CM_DHCP} -o quectel-CM ${LDFLAGS}
debug: clean
$(CC) ${CFLAGS} -g -DCM_DEBUG ${QL_CM_SRC} ${QL_CM_DHCP} -o quectel-CM -lpthread -ldl -lrt
qmi-proxy:
$(CC) ${CFLAGS} quectel-qmi-proxy.c -o quectel-qmi-proxy ${LDFLAGS}
mbim-proxy:
$(CC) ${CFLAGS} quectel-mbim-proxy.c -o quectel-mbim-proxy ${LDFLAGS}
qrtr-proxy:
$(CC) ${CFLAGS} quectel-qrtr-proxy.c -o quectel-qrtr-proxy ${LDFLAGS}
atc-proxy:
$(CC) ${CFLAGS} quectel-atc-proxy.c atchannel.c at_tok.c util.c -o quectel-atc-proxy ${LDFLAGS}
clean:
rm -rf *.o libmnl/*.o quectel-CM quectel-qmi-proxy quectel-mbim-proxy quectel-atc-proxy

View File

@ -0,0 +1,22 @@
bin_PROGRAMS = quectel-CM
QL_CM_SRC=QmiWwanCM.c GobiNetCM.c main.c MPQMUX.c QMIThread.c util.c qmap_bridge_mode.c mbim-cm.c device.c
QL_CM_SRC+=atc.c atchannel.c at_tok.c
#QL_CM_SRC+=qrtr.c rmnetctl.c
QL_CM_DHCP=udhcpc.c
if USE_QRTR
quectel_CM_CFLAGS = -DCONFIG_QRTR
QL_CM_SRC += qrtr.c rmnetctl.c
if USE_MSM_IPC
quectel_CM_CFLAGS += -DUSE_LINUX_MSM_IPC
endif
endif
quectel_CM_SOURCES = ${QL_CM_SRC} ${QL_CM_DHCP}
bin_PROGRAMS += quectel-qmi-proxy
quectel_qmi_proxy_SOURCES = quectel-qmi-proxy.c
bin_PROGRAMS += quectel-mbim-proxy
quectel_mbim_proxy_SOURCES = quectel-mbim-proxy.c
LIBS = -l pthread
CFLAGS = -Wall -Wextra -Werror -O1

View File

@ -0,0 +1,7 @@
This program is totally open souce code, and public domain software for customers of Quectel company.
The APIs of QMI WWAMN interfaces are defined by Qualcomm. And this program complies with Qualcomm QMI WWAN interfaces specification.
Customers are free to modify the source codes and redistribute them.
For those who is not Quectel's customer, all rights are closed, and any copying and commercial development over this progrma is not allowed.

View File

@ -0,0 +1,394 @@
/******************************************************************************
@file QCQCTL.h
DESCRIPTION
This module contains QMI QCTL module.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#ifndef QCQCTL_H
#define QCQCTL_H
#include "QCQMI.h"
#pragma pack(push, 1)
// ================= QMICTL ==================
// QMICTL Control Flags
#define QMICTL_CTL_FLAG_CMD 0x00
#define QMICTL_CTL_FLAG_RSP 0x01
#define QMICTL_CTL_FLAG_IND 0x02
#if 0
typedef struct _QMICTL_TRANSACTION_ITEM
{
LIST_ENTRY List;
UCHAR TransactionId; // QMICTL transaction id
PVOID Context; // Adapter or IocDev
PIRP Irp;
} QMICTL_TRANSACTION_ITEM, *PQMICTL_TRANSACTION_ITEM;
#endif
typedef struct _QCQMICTL_MSG_HDR
{
UCHAR CtlFlags; // 00-cmd, 01-rsp, 10-ind
UCHAR TransactionId;
USHORT QMICTLType;
USHORT Length;
} __attribute__ ((packed)) QCQMICTL_MSG_HDR, *PQCQMICTL_MSG_HDR;
#define QCQMICTL_MSG_HDR_SIZE sizeof(QCQMICTL_MSG_HDR)
typedef struct _QCQMICTL_MSG_HDR_RESP
{
UCHAR CtlFlags; // 00-cmd, 01-rsp, 10-ind
UCHAR TransactionId;
USHORT QMICTLType;
USHORT Length;
UCHAR TLVType; // 0x02 - result code
USHORT TLVLength; // 4
USHORT QMUXResult; // QMI_RESULT_SUCCESS
// QMI_RESULT_FAILURE
USHORT QMUXError; // QMI_ERR_INVALID_ARG
// QMI_ERR_NO_MEMORY
// QMI_ERR_INTERNAL
// QMI_ERR_FAULT
} __attribute__ ((packed)) QCQMICTL_MSG_HDR_RESP, *PQCQMICTL_MSG_HDR_RESP;
typedef struct _QCQMICTL_MSG
{
UCHAR CtlFlags; // 00-cmd, 01-rsp, 10-ind
UCHAR TransactionId;
USHORT QMICTLType;
USHORT Length;
UCHAR Payload;
} __attribute__ ((packed)) QCQMICTL_MSG, *PQCQMICTL_MSG;
// TLV Header
typedef struct _QCQMICTL_TLV_HDR
{
UCHAR TLVType;
USHORT TLVLength;
} __attribute__ ((packed)) QCQMICTL_TLV_HDR, *PQCQMICTL_TLV_HDR;
#define QCQMICTL_TLV_HDR_SIZE sizeof(QCQMICTL_TLV_HDR)
// QMICTL Type
#define QMICTL_SET_INSTANCE_ID_REQ 0x0020
#define QMICTL_SET_INSTANCE_ID_RESP 0x0020
#define QMICTL_GET_VERSION_REQ 0x0021
#define QMICTL_GET_VERSION_RESP 0x0021
#define QMICTL_GET_CLIENT_ID_REQ 0x0022
#define QMICTL_GET_CLIENT_ID_RESP 0x0022
#define QMICTL_RELEASE_CLIENT_ID_REQ 0x0023
#define QMICTL_RELEASE_CLIENT_ID_RESP 0x0023
#define QMICTL_REVOKE_CLIENT_ID_IND 0x0024
#define QMICTL_INVALID_CLIENT_ID_IND 0x0025
#define QMICTL_SET_DATA_FORMAT_REQ 0x0026
#define QMICTL_SET_DATA_FORMAT_RESP 0x0026
#define QMICTL_SYNC_REQ 0x0027
#define QMICTL_SYNC_RESP 0x0027
#define QMICTL_SYNC_IND 0x0027
#define QMI_MESSAGE_CTL_INTERNAL_PROXY_OPEN 0xFF00
#define QMICTL_FLAG_REQUEST 0x00
#define QMICTL_FLAG_RESPONSE 0x01
#define QMICTL_FLAG_INDICATION 0x02
// QMICTL Message Definitions
typedef struct _QMICTL_SET_INSTANCE_ID_REQ_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_REQUEST
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_SET_INSTANCE_ID_REQ
USHORT Length; // 4
UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER
USHORT TLVLength; // 1
UCHAR Value; // Host-unique QMI instance for this device driver
} __attribute__ ((packed)) QMICTL_SET_INSTANCE_ID_REQ_MSG, *PQMICTL_SET_INSTANCE_ID_REQ_MSG;
typedef struct _QMICTL_SET_INSTANCE_ID_RESP_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_SET_INSTANCE_ID_RESP
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE
USHORT TLVLength; // 0x0004
USHORT QMIResult;
USHORT QMIError;
UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER
USHORT TLV2Length; // 0x0002
USHORT QMI_ID; // Upper byte is assigned by MSM,
// lower assigned by host
} __attribute__ ((packed)) QMICTL_SET_INSTANCE_ID_RESP_MSG, *PQMICTL_SET_INSTANCE_ID_RESP_MSG;
typedef struct _QMICTL_GET_VERSION_REQ_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_REQUEST
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_GET_VERSION_REQ
USHORT Length; // 0
UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER
USHORT TLVLength; // var
UCHAR QMUXTypes; // List of one byte QMUX_TYPE values
// 0xFF returns a list of versions for all
// QMUX_TYPEs implemented on the device
} __attribute__ ((packed)) QMICTL_GET_VERSION_REQ_MSG, *PQMICTL_GET_VERSION_REQ_MSG;
typedef struct _QMUX_TYPE_VERSION_STRUCT
{
UCHAR QMUXType;
USHORT MajorVersion;
USHORT MinorVersion;
} __attribute__ ((packed)) QMUX_TYPE_VERSION_STRUCT, *PQMUX_TYPE_VERSION_STRUCT;
typedef struct _ADDENDUM_VERSION_PREAMBLE
{
UCHAR LabelLength;
UCHAR Label;
} __attribute__ ((packed)) ADDENDUM_VERSION_PREAMBLE, *PADDENDUM_VERSION_PREAMBLE;
#define QMICTL_GET_VERSION_RSP_TLV_TYPE_VERSION 0x01
#define QMICTL_GET_VERSION_RSP_TLV_TYPE_ADD_VERSION 0x10
typedef struct _QMICTL_GET_VERSION_RESP_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_GET_VERSION_RESP
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE
USHORT TLVLength; // 0x0004
USHORT QMIResult;
USHORT QMIError;
UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER
USHORT TLV2Length; // var
UCHAR NumElements; // Num of QMUX_TYPE_VERSION_STRUCT
QMUX_TYPE_VERSION_STRUCT TypeVersion[0];
} __attribute__ ((packed)) QMICTL_GET_VERSION_RESP_MSG, *PQMICTL_GET_VERSION_RESP_MSG;
typedef struct _QMICTL_GET_CLIENT_ID_REQ_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_REQUEST
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_GET_CLIENT_ID_REQ
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER
USHORT TLVLength; // 1
UCHAR QMIType; // QMUX type
} __attribute__ ((packed)) QMICTL_GET_CLIENT_ID_REQ_MSG, *PQMICTL_GET_CLIENT_ID_REQ_MSG;
typedef struct _QMICTL_GET_CLIENT_ID_RESP_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_GET_CLIENT_ID_RESP
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE
USHORT TLVLength; // 0x0004
USHORT QMIResult; // result code
USHORT QMIError; // error code
UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER
USHORT TLV2Length; // 2
UCHAR QMIType;
UCHAR ClientId;
} __attribute__ ((packed)) QMICTL_GET_CLIENT_ID_RESP_MSG, *PQMICTL_GET_CLIENT_ID_RESP_MSG;
typedef struct _QMICTL_RELEASE_CLIENT_ID_REQ_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_REQUEST
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_RELEASE_CLIENT_ID_REQ
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER
USHORT TLVLength; // 0x0002
UCHAR QMIType;
UCHAR ClientId;
} __attribute__ ((packed)) QMICTL_RELEASE_CLIENT_ID_REQ_MSG, *PQMICTL_RELEASE_CLIENT_ID_REQ_MSG;
typedef struct _QMICTL_RELEASE_CLIENT_ID_RESP_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_RELEASE_CLIENT_ID_RESP
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE
USHORT TLVLength; // 0x0004
USHORT QMIResult; // result code
USHORT QMIError; // error code
UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER
USHORT TLV2Length; // 2
UCHAR QMIType;
UCHAR ClientId;
} __attribute__ ((packed)) QMICTL_RELEASE_CLIENT_ID_RESP_MSG, *PQMICTL_RELEASE_CLIENT_ID_RESP_MSG;
typedef struct _QMICTL_REVOKE_CLIENT_ID_IND_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_INDICATION
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_REVOKE_CLIENT_ID_IND
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER
USHORT TLVLength; // 0x0002
UCHAR QMIType;
UCHAR ClientId;
} __attribute__ ((packed)) QMICTL_REVOKE_CLIENT_ID_IND_MSG, *PQMICTL_REVOKE_CLIENT_ID_IND_MSG;
typedef struct _QMICTL_INVALID_CLIENT_ID_IND_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_INDICATION
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_REVOKE_CLIENT_ID_IND
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER
USHORT TLVLength; // 0x0002
UCHAR QMIType;
UCHAR ClientId;
} __attribute__ ((packed)) QMICTL_INVALID_CLIENT_ID_IND_MSG, *PQMICTL_INVALID_CLIENT_ID_IND_MSG;
typedef struct _QMICTL_SET_DATA_FORMAT_REQ_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_REQUEST
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_SET_DATA_FORMAT_REQ
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER
USHORT TLVLength; // 1
UCHAR DataFormat; // 0-default; 1-QoS hdr present
} __attribute__ ((packed)) QMICTL_SET_DATA_FORMAT_REQ_MSG, *PQMICTL_SET_DATA_FORMAT_REQ_MSG;
#ifdef QC_IP_MODE
#define SET_DATA_FORMAT_TLV_TYPE_LINK_PROTO 0x10
#define SET_DATA_FORMAT_LINK_PROTO_ETH 0x0001
#define SET_DATA_FORMAT_LINK_PROTO_IP 0x0002
typedef struct _QMICTL_SET_DATA_FORMAT_TLV_LINK_PROT
{
UCHAR TLVType; // Link-Layer Protocol
USHORT TLVLength; // 2
USHORT LinkProt; // 0x1: ETH; 0x2: IP
} QMICTL_SET_DATA_FORMAT_TLV_LINK_PROT, *PQMICTL_SET_DATA_FORMAT_TLV_LINK_PROT;
#ifdef QCMP_UL_TLP
#define SET_DATA_FORMAT_TLV_TYPE_UL_TLP 0x11
typedef struct _QMICTL_SET_DATA_FORMAT_TLV_UL_TLP
{
UCHAR TLVType; // 0x11, Uplink TLP Setting
USHORT TLVLength; // 1
UCHAR UlTlpSetting; // 0x0: Disable; 0x01: Enable
} QMICTL_SET_DATA_FORMAT_TLV_UL_TLP, *PQMICTL_SET_DATA_FORMAT_TLV_UL_TLP;
#endif // QCMP_UL_TLP
#ifdef QCMP_DL_TLP
#define SET_DATA_FORMAT_TLV_TYPE_DL_TLP 0x13
typedef struct _QMICTL_SET_DATA_FORMAT_TLV_DL_TLP
{
UCHAR TLVType; // 0x11, Uplink TLP Setting
USHORT TLVLength; // 1
UCHAR DlTlpSetting; // 0x0: Disable; 0x01: Enable
} QMICTL_SET_DATA_FORMAT_TLV_DL_TLP, *PQMICTL_SET_DATA_FORMAT_TLV_DL_TLP;
#endif // QCMP_DL_TLP
#endif // QC_IP_MODE
#ifdef MP_QCQOS_ENABLED
#define SET_DATA_FORMAT_TLV_TYPE_QOS_SETTING 0x12
typedef struct _QMICTL_SET_DATA_FORMAT_TLV_QOS_SETTING
{
UCHAR TLVType; // 0x12, QoS setting
USHORT TLVLength; // 1
UCHAR QosSetting; // 0x0: Disable; 0x01: Enable
} QMICTL_SET_DATA_FORMAT_TLV_QOS_SETTING, *PQMICTL_SET_DATA_FORMAT_TLV_QOS_SETTING;
#endif // MP_QCQOS_ENABLED
typedef struct _QMICTL_SET_DATA_FORMAT_RESP_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_SET_DATA_FORMAT_RESP
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE
USHORT TLVLength; // 0x0004
USHORT QMIResult; // result code
USHORT QMIError; // error code
} __attribute__ ((packed)) QMICTL_SET_DATA_FORMAT_RESP_MSG, *PQMICTL_SET_DATA_FORMAT_RESP_MSG;
typedef struct _QMICTL_SYNC_REQ_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_REQUEST
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_CTL_SYNC_REQ
USHORT Length; // 0
} __attribute__ ((packed)) QMICTL_SYNC_REQ_MSG, *PQMICTL_SYNC_REQ_MSG;
typedef struct _QMICTL_SYNC_RESP_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_CTL_SYNC_RESP
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE
USHORT TLVLength; // 0x0004
USHORT QMIResult;
USHORT QMIError;
} __attribute__ ((packed)) QMICTL_SYNC_RESP_MSG, *PQMICTL_SYNC_RESP_MSG;
typedef struct _QMICTL_SYNC_IND_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_INDICATION
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_REVOKE_CLIENT_ID_IND
USHORT Length;
} __attribute__ ((packed)) QMICTL_SYNC_IND_MSG, *PQMICTL_SYNC_IND_MSG;
typedef struct _QMICTL_LIBQMI_PROXY_OPEN_MSG
{
UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE
UCHAR TransactionId;
USHORT QMICTLType; // QMICTL_SET_DATA_FORMAT_RESP
USHORT Length;
UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE
USHORT TLVLength; // 0x0004
char device_path[0]; // result code
} __attribute__ ((packed)) QMICTL_LIBQMI_PROXY_OPEN_MSG, *PQMICTL_LIBQMI_PROXY_OPEN_MSG;
typedef struct _QMICTL_MSG
{
union
{
// Message Header
QCQMICTL_MSG_HDR QMICTLMsgHdr;
QCQMICTL_MSG_HDR_RESP QMICTLMsgHdrRsp;
// QMICTL Message
QMICTL_SET_INSTANCE_ID_REQ_MSG SetInstanceIdReq;
QMICTL_SET_INSTANCE_ID_RESP_MSG SetInstanceIdRsp;
QMICTL_GET_VERSION_REQ_MSG GetVersionReq;
QMICTL_GET_VERSION_RESP_MSG GetVersionRsp;
QMICTL_GET_CLIENT_ID_REQ_MSG GetClientIdReq;
QMICTL_GET_CLIENT_ID_RESP_MSG GetClientIdRsp;
QMICTL_RELEASE_CLIENT_ID_REQ_MSG ReleaseClientIdReq;
QMICTL_RELEASE_CLIENT_ID_RESP_MSG ReleaseClientIdRsp;
QMICTL_REVOKE_CLIENT_ID_IND_MSG RevokeClientIdInd;
QMICTL_INVALID_CLIENT_ID_IND_MSG InvalidClientIdInd;
QMICTL_SET_DATA_FORMAT_REQ_MSG SetDataFormatReq;
QMICTL_SET_DATA_FORMAT_RESP_MSG SetDataFormatRsp;
QMICTL_SYNC_REQ_MSG SyncReq;
QMICTL_SYNC_RESP_MSG SyncRsp;
QMICTL_SYNC_IND_MSG SyncInd;
QMICTL_LIBQMI_PROXY_OPEN_MSG LibQmiProxyOpenReq;
};
} __attribute__ ((packed)) QMICTL_MSG, *PQMICTL_MSG;
#pragma pack(pop)
#endif //QCQCTL_H

View File

@ -0,0 +1,320 @@
/******************************************************************************
@file QCQMI.h
DESCRIPTION
This module contains QMI module.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#ifndef USBQMI_H
#define USBQMI_H
typedef uint8_t uint8;
typedef int8_t int8;
typedef uint16_t uint16;
typedef int16_t int16;
typedef uint32_t uint32;
typedef uint64_t uint64;
typedef signed char CHAR;
typedef unsigned char UCHAR;
typedef short SHORT;
typedef unsigned short USHORT;
typedef int INT;
typedef unsigned int UINT;
typedef long LONG;
typedef unsigned int ULONG;
typedef unsigned long long ULONG64;
typedef signed char *PCHAR;
typedef unsigned char *PUCHAR;
typedef int *PINT;
typedef int BOOL;
#define TRUE (1 == 1)
#define FALSE (1 != 1)
#define QMICTL_SUPPORTED_MAJOR_VERSION 1
#define QMICTL_SUPPORTED_MINOR_VERSION 0
#pragma pack(push, 1)
// ========= USB Control Message ==========
#define USB_CTL_MSG_TYPE_QMI 0x01
// USB Control Message
typedef struct _QCUSB_CTL_MSG_HDR
{
UCHAR IFType;
} __attribute__ ((packed)) QCUSB_CTL_MSG_HDR, *PQCUSB_CTL_MSG_HDR;
#define QCUSB_CTL_MSG_HDR_SIZE sizeof(QCUSB_CTL_MSG_HDR)
typedef struct _QCUSB_CTL_MSG
{
UCHAR IFType;
UCHAR Message;
} __attribute__ ((packed)) QCUSB_CTL_MSG, *PQCUSB_CTL_MSG;
#define QCTLV_TYPE_REQUIRED_PARAMETER 0x01
#define QCTLV_TYPE_RESULT_CODE 0x02
// ================= QMI ==================
// Define QMI Type
typedef enum _QMI_SERVICE_TYPE
{
QMUX_TYPE_CTL = 0x00,
QMUX_TYPE_WDS = 0x01,
QMUX_TYPE_DMS = 0x02,
QMUX_TYPE_NAS = 0x03,
QMUX_TYPE_QOS = 0x04,
QMUX_TYPE_WMS = 0x05,
QMUX_TYPE_PDS = 0x06,
QMUX_TYPE_UIM = 0x0B,
QMUX_TYPE_WDS_IPV6 = 0x11,
QMUX_TYPE_WDS_ADMIN = 0x1A,
QMUX_TYPE_COEX = 0x22,
QMUX_TYPE_MAX = 0xFF,
QMUX_TYPE_ALL = 0xFF
} QMI_SERVICE_TYPE;
typedef enum _QMI_RESULT_CODE_TYPE
{
QMI_RESULT_SUCCESS = 0x0000,
QMI_RESULT_FAILURE = 0x0001
} QMI_RESULT_CODE_TYPE;
typedef enum _QMI_ERROR_CODE_TYPE
{
QMI_ERR_NONE = 0x0000
,QMI_ERR_MALFORMED_MSG = 0x0001
,QMI_ERR_NO_MEMORY = 0x0002
,QMI_ERR_INTERNAL = 0x0003
,QMI_ERR_ABORTED = 0x0004
,QMI_ERR_CLIENT_IDS_EXHAUSTED = 0x0005
,QMI_ERR_UNABORTABLE_TRANSACTION = 0x0006
,QMI_ERR_INVALID_CLIENT_ID = 0x0007
,QMI_ERR_NO_THRESHOLDS = 0x0008
,QMI_ERR_INVALID_HANDLE = 0x0009
,QMI_ERR_INVALID_PROFILE = 0x000A
,QMI_ERR_INVALID_PINID = 0x000B
,QMI_ERR_INCORRECT_PIN = 0x000C
,QMI_ERR_NO_NETWORK_FOUND = 0x000D
,QMI_ERR_CALL_FAILED = 0x000E
,QMI_ERR_OUT_OF_CALL = 0x000F
,QMI_ERR_NOT_PROVISIONED = 0x0010
,QMI_ERR_MISSING_ARG = 0x0011
,QMI_ERR_ARG_TOO_LONG = 0x0013
,QMI_ERR_INVALID_TX_ID = 0x0016
,QMI_ERR_DEVICE_IN_USE = 0x0017
,QMI_ERR_OP_NETWORK_UNSUPPORTED = 0x0018
,QMI_ERR_OP_DEVICE_UNSUPPORTED = 0x0019
,QMI_ERR_NO_EFFECT = 0x001A
,QMI_ERR_NO_FREE_PROFILE = 0x001B
,QMI_ERR_INVALID_PDP_TYPE = 0x001C
,QMI_ERR_INVALID_TECH_PREF = 0x001D
,QMI_ERR_INVALID_PROFILE_TYPE = 0x001E
,QMI_ERR_INVALID_SERVICE_TYPE = 0x001F
,QMI_ERR_INVALID_REGISTER_ACTION = 0x0020
,QMI_ERR_INVALID_PS_ATTACH_ACTION = 0x0021
,QMI_ERR_AUTHENTICATION_FAILED = 0x0022
,QMI_ERR_PIN_BLOCKED = 0x0023
,QMI_ERR_PIN_PERM_BLOCKED = 0x0024
,QMI_ERR_SIM_NOT_INITIALIZED = 0x0025
,QMI_ERR_MAX_QOS_REQUESTS_IN_USE = 0x0026
,QMI_ERR_INCORRECT_FLOW_FILTER = 0x0027
,QMI_ERR_NETWORK_QOS_UNAWARE = 0x0028
,QMI_ERR_INVALID_QOS_ID = 0x0029
,QMI_ERR_INVALID_ID = 0x0029
,QMI_ERR_REQUESTED_NUM_UNSUPPORTED = 0x002A
,QMI_ERR_INTERFACE_NOT_FOUND = 0x002B
,QMI_ERR_FLOW_SUSPENDED = 0x002C
,QMI_ERR_INVALID_DATA_FORMAT = 0x002D
,QMI_ERR_GENERAL = 0x002E
,QMI_ERR_UNKNOWN = 0x002F
,QMI_ERR_INVALID_ARG = 0x0030
,QMI_ERR_INVALID_INDEX = 0x0031
,QMI_ERR_NO_ENTRY = 0x0032
,QMI_ERR_DEVICE_STORAGE_FULL = 0x0033
,QMI_ERR_DEVICE_NOT_READY = 0x0034
,QMI_ERR_NETWORK_NOT_READY = 0x0035
,QMI_ERR_CAUSE_CODE = 0x0036
,QMI_ERR_MESSAGE_NOT_SENT = 0x0037
,QMI_ERR_MESSAGE_DELIVERY_FAILURE = 0x0038
,QMI_ERR_INVALID_MESSAGE_ID = 0x0039
,QMI_ERR_ENCODING = 0x003A
,QMI_ERR_AUTHENTICATION_LOCK = 0x003B
,QMI_ERR_INVALID_TRANSITION = 0x003C
,QMI_ERR_NOT_A_MCAST_IFACE = 0x003D
,QMI_ERR_MAX_MCAST_REQUESTS_IN_USE = 0x003E
,QMI_ERR_INVALID_MCAST_HANDLE = 0x003F
,QMI_ERR_INVALID_IP_FAMILY_PREF = 0x0040
,QMI_ERR_SESSION_INACTIVE = 0x0041
,QMI_ERR_SESSION_INVALID = 0x0042
,QMI_ERR_SESSION_OWNERSHIP = 0x0043
,QMI_ERR_INSUFFICIENT_RESOURCES = 0x0044
,QMI_ERR_DISABLED = 0x0045
,QMI_ERR_INVALID_OPERATION = 0x0046
,QMI_ERR_INVALID_QMI_CMD = 0x0047
,QMI_ERR_TPDU_TYPE = 0x0048
,QMI_ERR_SMSC_ADDR = 0x0049
,QMI_ERR_INFO_UNAVAILABLE = 0x004A
,QMI_ERR_SEGMENT_TOO_LONG = 0x004B
,QMI_ERR_SEGMENT_ORDER = 0x004C
,QMI_ERR_BUNDLING_NOT_SUPPORTED = 0x004D
,QMI_ERR_OP_PARTIAL_FAILURE = 0x004E
,QMI_ERR_POLICY_MISMATCH = 0x004F
,QMI_ERR_SIM_FILE_NOT_FOUND = 0x0050
,QMI_ERR_EXTENDED_INTERNAL = 0x0051
,QMI_ERR_ACCESS_DENIED = 0x0052
,QMI_ERR_HARDWARE_RESTRICTED = 0x0053
,QMI_ERR_ACK_NOT_SENT = 0x0054
,QMI_ERR_INJECT_TIMEOUT = 0x0055
,QMI_ERR_INCOMPATIBLE_STATE = 0x005A
,QMI_ERR_FDN_RESTRICT = 0x005B
,QMI_ERR_SUPS_FAILURE_CAUSE = 0x005C
,QMI_ERR_NO_RADIO = 0x005D
,QMI_ERR_NOT_SUPPORTED = 0x005E
,QMI_ERR_NO_SUBSCRIPTION = 0x005F
,QMI_ERR_CARD_CALL_CONTROL_FAILED = 0x0060
,QMI_ERR_NETWORK_ABORTED = 0x0061
,QMI_ERR_MSG_BLOCKED = 0x0062
,QMI_ERR_INVALID_SESSION_TYPE = 0x0064
,QMI_ERR_INVALID_PB_TYPE = 0x0065
,QMI_ERR_NO_SIM = 0x0066
,QMI_ERR_PB_NOT_READY = 0x0067
,QMI_ERR_PIN_RESTRICTION = 0x0068
,QMI_ERR_PIN2_RESTRICTION = 0x0069
,QMI_ERR_PUK_RESTRICTION = 0x006A
,QMI_ERR_PUK2_RESTRICTION = 0x006B
,QMI_ERR_PB_ACCESS_RESTRICTED = 0x006C
,QMI_ERR_PB_DELETE_IN_PROG = 0x006D
,QMI_ERR_PB_TEXT_TOO_LONG = 0x006E
,QMI_ERR_PB_NUMBER_TOO_LONG = 0x006F
,QMI_ERR_PB_HIDDEN_KEY_RESTRICTION = 0x0070
} QMI_ERROR_CODE_TYPE;
#define QCQMI_CTL_FLAG_SERVICE 0x80
#define QCQMI_CTL_FLAG_CTL_POINT 0x00
typedef struct _QCQMI_HDR
{
UCHAR IFType;
USHORT Length;
UCHAR CtlFlags; // reserved
UCHAR QMIType;
UCHAR ClientId;
} __attribute__ ((packed)) QCQMI_HDR, *PQCQMI_HDR;
#define QCQMI_HDR_SIZE (sizeof(QCQMI_HDR)-1)
typedef struct _QCQMI
{
UCHAR IFType;
USHORT Length;
UCHAR CtlFlags; // reserved
UCHAR QMIType;
UCHAR ClientId;
UCHAR SDU;
} __attribute__ ((packed)) QCQMI, *PQCQMI;
typedef struct _QMI_SERVICE_VERSION
{
USHORT Major;
USHORT Minor;
USHORT AddendumMajor;
USHORT AddendumMinor;
} __attribute__ ((packed)) QMI_SERVICE_VERSION, *PQMI_SERVICE_VERSION;
// ================= QMUX ==================
#define QMUX_MSG_OVERHEAD_BYTES 4 // Type(USHORT) Length(USHORT) -- header
#define QMUX_BROADCAST_CID 0xFF
typedef struct _QCQMUX_HDR
{
UCHAR CtlFlags; // 0: single QMUX Msg; 1:
USHORT TransactionId;
} __attribute__ ((packed)) QCQMUX_HDR, *PQCQMUX_HDR;
typedef struct _QCQMUX
{
UCHAR CtlFlags; // 0: single QMUX Msg; 1:
USHORT TransactionId;
UCHAR Message; // Type(2), Length(2), Value
} __attribute__ ((packed)) QCQMUX, *PQCQMUX;
#define QCQMUX_HDR_SIZE sizeof(QCQMUX_HDR)
typedef struct _QCQMUX_MSG_HDR
{
USHORT Type;
USHORT Length;
} __attribute__ ((packed)) QCQMUX_MSG_HDR, *PQCQMUX_MSG_HDR;
#define QCQMUX_MSG_HDR_SIZE sizeof(QCQMUX_MSG_HDR)
typedef struct _QCQMUX_MSG_HDR_RESP
{
USHORT Type;
USHORT Length;
UCHAR TLVType; // 0x02 - result code
USHORT TLVLength; // 4
USHORT QMUXResult; // QMI_RESULT_SUCCESS
// QMI_RESULT_FAILURE
USHORT QMUXError; // QMI_ERR_INVALID_ARG
// QMI_ERR_NO_MEMORY
// QMI_ERR_INTERNAL
// QMI_ERR_FAULT
} __attribute__ ((packed)) QCQMUX_MSG_HDR_RESP, *PQCQMUX_MSG_HDR_RESP;
typedef struct _QCQMUX_TLV
{
UCHAR Type;
USHORT Length;
UCHAR Value;
} __attribute__ ((packed)) QCQMUX_TLV, *PQCQMUX_TLV;
typedef struct _QMI_TLV_HDR
{
UCHAR TLVType;
USHORT TLVLength;
} __attribute__ ((packed)) QMI_TLV_HDR, *PQMI_TLV_HDR;
typedef struct _QMI_TLV
{
UCHAR TLVType;
USHORT TLVLength;
union {
int8_t s8;
uint8_t u8;
int16_t s16;
uint16_t u16;
int32_t s32;
uint32_t u32;
uint64_t u64;
};
} __attribute__ ((packed)) QMI_TLV, *PQMI_TLV;
// QMUX Message Definitions -- QMI SDU
#define QMUX_CTL_FLAG_SINGLE_MSG 0x00
#define QMUX_CTL_FLAG_COMPOUND_MSG 0x01
#define QMUX_CTL_FLAG_TYPE_CMD 0x00
#define QMUX_CTL_FLAG_TYPE_RSP 0x02
#define QMUX_CTL_FLAG_TYPE_IND 0x04
#define QMUX_CTL_FLAG_MASK_COMPOUND 0x01
#define QMUX_CTL_FLAG_MASK_TYPE 0x06 // 00-cmd, 01-rsp, 10-ind
#pragma pack(pop)
#endif // USBQMI_H

View File

@ -0,0 +1,477 @@
/******************************************************************************
@file MPQMUX.c
@brief QMI mux.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include "QMIThread.h"
static char line[1024];
static pthread_mutex_t dumpQMIMutex = PTHREAD_MUTEX_INITIALIZER;
#undef dbg
#define dbg( format, arg... ) do {if (strlen(line) < sizeof(line)) snprintf(&line[strlen(line)], sizeof(line) - strlen(line), format, ## arg);} while (0)
PQMI_TLV_HDR GetTLV (PQCQMUX_MSG_HDR pQMUXMsgHdr, int TLVType);
typedef struct {
UINT type;
const char *name;
} QMI_NAME_T;
#define qmi_name_item(type) {type, #type}
#if 0
static const QMI_NAME_T qmi_IFType[] = {
{USB_CTL_MSG_TYPE_QMI, "USB_CTL_MSG_TYPE_QMI"},
};
static const QMI_NAME_T qmi_CtlFlags[] = {
qmi_name_item(QMICTL_CTL_FLAG_CMD),
qmi_name_item(QCQMI_CTL_FLAG_SERVICE),
};
static const QMI_NAME_T qmi_QMIType[] = {
qmi_name_item(QMUX_TYPE_CTL),
qmi_name_item(QMUX_TYPE_WDS),
qmi_name_item(QMUX_TYPE_DMS),
qmi_name_item(QMUX_TYPE_NAS),
qmi_name_item(QMUX_TYPE_QOS),
qmi_name_item(QMUX_TYPE_WMS),
qmi_name_item(QMUX_TYPE_PDS),
qmi_name_item(QMUX_TYPE_WDS_ADMIN),
qmi_name_item(QMUX_TYPE_COEX),
};
static const QMI_NAME_T qmi_ctl_CtlFlags[] = {
qmi_name_item(QMICTL_FLAG_REQUEST),
qmi_name_item(QMICTL_FLAG_RESPONSE),
qmi_name_item(QMICTL_FLAG_INDICATION),
};
#endif
static const QMI_NAME_T qmux_ctl_QMICTLType[] = {
// QMICTL Type
qmi_name_item(QMICTL_SET_INSTANCE_ID_REQ), // 0x0020
qmi_name_item(QMICTL_SET_INSTANCE_ID_RESP), // 0x0020
qmi_name_item(QMICTL_GET_VERSION_REQ), // 0x0021
qmi_name_item(QMICTL_GET_VERSION_RESP), // 0x0021
qmi_name_item(QMICTL_GET_CLIENT_ID_REQ), // 0x0022
qmi_name_item(QMICTL_GET_CLIENT_ID_RESP), // 0x0022
qmi_name_item(QMICTL_RELEASE_CLIENT_ID_REQ), // 0x0023
qmi_name_item(QMICTL_RELEASE_CLIENT_ID_RESP), // 0x0023
qmi_name_item(QMICTL_REVOKE_CLIENT_ID_IND), // 0x0024
qmi_name_item(QMICTL_INVALID_CLIENT_ID_IND), // 0x0025
qmi_name_item(QMICTL_SET_DATA_FORMAT_REQ), // 0x0026
qmi_name_item(QMICTL_SET_DATA_FORMAT_RESP), // 0x0026
qmi_name_item(QMICTL_SYNC_REQ), // 0x0027
qmi_name_item(QMICTL_SYNC_RESP), // 0x0027
qmi_name_item(QMICTL_SYNC_IND), // 0x0027
};
static const QMI_NAME_T qmux_CtlFlags[] = {
qmi_name_item(QMUX_CTL_FLAG_TYPE_CMD),
qmi_name_item(QMUX_CTL_FLAG_TYPE_RSP),
qmi_name_item(QMUX_CTL_FLAG_TYPE_IND),
};
static const QMI_NAME_T qmux_wds_Type[] = {
qmi_name_item(QMIWDS_SET_EVENT_REPORT_REQ), // 0x0001
qmi_name_item(QMIWDS_SET_EVENT_REPORT_RESP), // 0x0001
qmi_name_item(QMIWDS_EVENT_REPORT_IND), // 0x0001
qmi_name_item(QMIWDS_START_NETWORK_INTERFACE_REQ), // 0x0020
qmi_name_item(QMIWDS_START_NETWORK_INTERFACE_RESP), // 0x0020
qmi_name_item(QMIWDS_STOP_NETWORK_INTERFACE_REQ), // 0x0021
qmi_name_item(QMIWDS_STOP_NETWORK_INTERFACE_RESP), // 0x0021
qmi_name_item(QMIWDS_GET_PKT_SRVC_STATUS_REQ), // 0x0022
qmi_name_item(QMIWDS_GET_PKT_SRVC_STATUS_RESP), // 0x0022
qmi_name_item(QMIWDS_GET_PKT_SRVC_STATUS_IND), // 0x0022
qmi_name_item(QMIWDS_GET_CURRENT_CHANNEL_RATE_REQ), // 0x0023
qmi_name_item(QMIWDS_GET_CURRENT_CHANNEL_RATE_RESP), // 0x0023
qmi_name_item(QMIWDS_GET_PKT_STATISTICS_REQ), // 0x0024
qmi_name_item(QMIWDS_GET_PKT_STATISTICS_RESP), // 0x0024
qmi_name_item(QMIWDS_MODIFY_PROFILE_SETTINGS_REQ), // 0x0028
qmi_name_item(QMIWDS_MODIFY_PROFILE_SETTINGS_RESP), // 0x0028
qmi_name_item(QMIWDS_GET_PROFILE_SETTINGS_REQ), // 0x002B
qmi_name_item(QMIWDS_GET_PROFILE_SETTINGS_RESP), // 0x002BD
qmi_name_item(QMIWDS_GET_DEFAULT_SETTINGS_REQ), // 0x002C
qmi_name_item(QMIWDS_GET_DEFAULT_SETTINGS_RESP), // 0x002C
qmi_name_item(QMIWDS_GET_RUNTIME_SETTINGS_REQ), // 0x002D
qmi_name_item(QMIWDS_GET_RUNTIME_SETTINGS_RESP), // 0x002D
qmi_name_item(QMIWDS_GET_MIP_MODE_REQ), // 0x002F
qmi_name_item(QMIWDS_GET_MIP_MODE_RESP), // 0x002F
qmi_name_item(QMIWDS_GET_DATA_BEARER_REQ), // 0x0037
qmi_name_item(QMIWDS_GET_DATA_BEARER_RESP), // 0x0037
qmi_name_item(QMIWDS_DUN_CALL_INFO_REQ), // 0x0038
qmi_name_item(QMIWDS_DUN_CALL_INFO_RESP), // 0x0038
qmi_name_item(QMIWDS_DUN_CALL_INFO_IND), // 0x0038
qmi_name_item(QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ), // 0x004D
qmi_name_item(QMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP), // 0x004D
qmi_name_item(QMIWDS_SET_AUTO_CONNECT_REQ), // 0x0051
qmi_name_item(QMIWDS_SET_AUTO_CONNECT_RESP), // 0x0051
qmi_name_item(QMIWDS_BIND_MUX_DATA_PORT_REQ), // 0x00A2
qmi_name_item(QMIWDS_BIND_MUX_DATA_PORT_RESP), // 0x00A2
};
static const QMI_NAME_T qmux_dms_Type[] = {
// ======================= DMS ==============================
qmi_name_item(QMIDMS_SET_EVENT_REPORT_REQ), // 0x0001
qmi_name_item(QMIDMS_SET_EVENT_REPORT_RESP), // 0x0001
qmi_name_item(QMIDMS_EVENT_REPORT_IND), // 0x0001
qmi_name_item(QMIDMS_GET_DEVICE_CAP_REQ), // 0x0020
qmi_name_item(QMIDMS_GET_DEVICE_CAP_RESP), // 0x0020
qmi_name_item(QMIDMS_GET_DEVICE_MFR_REQ), // 0x0021
qmi_name_item(QMIDMS_GET_DEVICE_MFR_RESP), // 0x0021
qmi_name_item(QMIDMS_GET_DEVICE_MODEL_ID_REQ), // 0x0022
qmi_name_item(QMIDMS_GET_DEVICE_MODEL_ID_RESP), // 0x0022
qmi_name_item(QMIDMS_GET_DEVICE_REV_ID_REQ), // 0x0023
qmi_name_item(QMIDMS_GET_DEVICE_REV_ID_RESP), // 0x0023
qmi_name_item(QMIDMS_GET_MSISDN_REQ), // 0x0024
qmi_name_item(QMIDMS_GET_MSISDN_RESP), // 0x0024
qmi_name_item(QMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ), // 0x0025
qmi_name_item(QMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP), // 0x0025
qmi_name_item(QMIDMS_UIM_SET_PIN_PROTECTION_REQ), // 0x0027
qmi_name_item(QMIDMS_UIM_SET_PIN_PROTECTION_RESP), // 0x0027
qmi_name_item(QMIDMS_UIM_VERIFY_PIN_REQ), // 0x0028
qmi_name_item(QMIDMS_UIM_VERIFY_PIN_RESP), // 0x0028
qmi_name_item(QMIDMS_UIM_UNBLOCK_PIN_REQ), // 0x0029
qmi_name_item(QMIDMS_UIM_UNBLOCK_PIN_RESP), // 0x0029
qmi_name_item(QMIDMS_UIM_CHANGE_PIN_REQ), // 0x002A
qmi_name_item(QMIDMS_UIM_CHANGE_PIN_RESP), // 0x002A
qmi_name_item(QMIDMS_UIM_GET_PIN_STATUS_REQ), // 0x002B
qmi_name_item(QMIDMS_UIM_GET_PIN_STATUS_RESP), // 0x002B
qmi_name_item(QMIDMS_GET_DEVICE_HARDWARE_REV_REQ), // 0x002C
qmi_name_item(QMIDMS_GET_DEVICE_HARDWARE_REV_RESP), // 0x002C
qmi_name_item(QMIDMS_GET_OPERATING_MODE_REQ), // 0x002D
qmi_name_item(QMIDMS_GET_OPERATING_MODE_RESP), // 0x002D
qmi_name_item(QMIDMS_SET_OPERATING_MODE_REQ), // 0x002E
qmi_name_item(QMIDMS_SET_OPERATING_MODE_RESP), // 0x002E
qmi_name_item(QMIDMS_GET_ACTIVATED_STATUS_REQ), // 0x0031
qmi_name_item(QMIDMS_GET_ACTIVATED_STATUS_RESP), // 0x0031
qmi_name_item(QMIDMS_ACTIVATE_AUTOMATIC_REQ), // 0x0032
qmi_name_item(QMIDMS_ACTIVATE_AUTOMATIC_RESP), // 0x0032
qmi_name_item(QMIDMS_ACTIVATE_MANUAL_REQ), // 0x0033
qmi_name_item(QMIDMS_ACTIVATE_MANUAL_RESP), // 0x0033
qmi_name_item(QMIDMS_UIM_GET_ICCID_REQ), // 0x003C
qmi_name_item(QMIDMS_UIM_GET_ICCID_RESP), // 0x003C
qmi_name_item(QMIDMS_UIM_GET_CK_STATUS_REQ), // 0x0040
qmi_name_item(QMIDMS_UIM_GET_CK_STATUS_RESP), // 0x0040
qmi_name_item(QMIDMS_UIM_SET_CK_PROTECTION_REQ), // 0x0041
qmi_name_item(QMIDMS_UIM_SET_CK_PROTECTION_RESP), // 0x0041
qmi_name_item(QMIDMS_UIM_UNBLOCK_CK_REQ), // 0x0042
qmi_name_item(QMIDMS_UIM_UNBLOCK_CK_RESP), // 0x0042
qmi_name_item(QMIDMS_UIM_GET_IMSI_REQ), // 0x0043
qmi_name_item(QMIDMS_UIM_GET_IMSI_RESP), // 0x0043
qmi_name_item(QMIDMS_UIM_GET_STATE_REQ), // 0x0044
qmi_name_item(QMIDMS_UIM_GET_STATE_RESP), // 0x0044
qmi_name_item(QMIDMS_GET_BAND_CAP_REQ), // 0x0045
qmi_name_item(QMIDMS_GET_BAND_CAP_RESP), // 0x0045
};
static const QMI_NAME_T qmux_qos_Type[] = {
qmi_name_item( QMI_QOS_SET_EVENT_REPORT_REQ), // 0x0001
qmi_name_item( QMI_QOS_SET_EVENT_REPORT_RESP), // 0x0001
qmi_name_item( QMI_QOS_SET_EVENT_REPORT_IND), // 0x0001
qmi_name_item( QMI_QOS_BIND_DATA_PORT_REQ), // 0x002B
qmi_name_item( QMI_QOS_BIND_DATA_PORT_RESP), // 0x002B
qmi_name_item( QMI_QOS_INDICATION_REGISTER_REQ), // 0x002F
qmi_name_item( QMI_QOS_INDICATION_REGISTER_RESP), // 0x002F
qmi_name_item( QMI_QOS_GLOBAL_QOS_FLOW_IND), // 0x0031
qmi_name_item( QMI_QOS_GET_QOS_INFO_REQ), // 0x0033
qmi_name_item( QMI_QOS_GET_QOS_INFO_RESP), // 0x0033
};
static const QMI_NAME_T qmux_nas_Type[] = {
// ======================= NAS ==============================
qmi_name_item(QMINAS_SET_EVENT_REPORT_REQ), // 0x0002
qmi_name_item(QMINAS_SET_EVENT_REPORT_RESP), // 0x0002
qmi_name_item(QMINAS_EVENT_REPORT_IND), // 0x0002
qmi_name_item(QMINAS_GET_SIGNAL_STRENGTH_REQ), // 0x0020
qmi_name_item(QMINAS_GET_SIGNAL_STRENGTH_RESP), // 0x0020
qmi_name_item(QMINAS_PERFORM_NETWORK_SCAN_REQ), // 0x0021
qmi_name_item(QMINAS_PERFORM_NETWORK_SCAN_RESP), // 0x0021
qmi_name_item(QMINAS_INITIATE_NW_REGISTER_REQ), // 0x0022
qmi_name_item(QMINAS_INITIATE_NW_REGISTER_RESP), // 0x0022
qmi_name_item(QMINAS_INITIATE_ATTACH_REQ), // 0x0023
qmi_name_item(QMINAS_INITIATE_ATTACH_RESP), // 0x0023
qmi_name_item(QMINAS_GET_SERVING_SYSTEM_REQ), // 0x0024
qmi_name_item(QMINAS_GET_SERVING_SYSTEM_RESP), // 0x0024
qmi_name_item(QMINAS_SERVING_SYSTEM_IND), // 0x0024
qmi_name_item(QMINAS_GET_HOME_NETWORK_REQ), // 0x0025
qmi_name_item(QMINAS_GET_HOME_NETWORK_RESP), // 0x0025
qmi_name_item(QMINAS_GET_PREFERRED_NETWORK_REQ), // 0x0026
qmi_name_item(QMINAS_GET_PREFERRED_NETWORK_RESP), // 0x0026
qmi_name_item(QMINAS_SET_PREFERRED_NETWORK_REQ), // 0x0027
qmi_name_item(QMINAS_SET_PREFERRED_NETWORK_RESP), // 0x0027
qmi_name_item(QMINAS_GET_FORBIDDEN_NETWORK_REQ), // 0x0028
qmi_name_item(QMINAS_GET_FORBIDDEN_NETWORK_RESP), // 0x0028
qmi_name_item(QMINAS_SET_FORBIDDEN_NETWORK_REQ), // 0x0029
qmi_name_item(QMINAS_SET_FORBIDDEN_NETWORK_RESP), // 0x0029
qmi_name_item(QMINAS_SET_TECHNOLOGY_PREF_REQ), // 0x002A
qmi_name_item(QMINAS_SET_TECHNOLOGY_PREF_RESP), // 0x002A
qmi_name_item(QMINAS_GET_RF_BAND_INFO_REQ), // 0x0031
qmi_name_item(QMINAS_GET_RF_BAND_INFO_RESP), // 0x0031
qmi_name_item(QMINAS_GET_CELL_LOCATION_INFO_REQ),
qmi_name_item(QMINAS_GET_CELL_LOCATION_INFO_RESP),
qmi_name_item(QMINAS_GET_PLMN_NAME_REQ), // 0x0044
qmi_name_item(QMINAS_GET_PLMN_NAME_RESP), // 0x0044
qmi_name_item(QUECTEL_PACKET_TRANSFER_START_IND), // 0X100
qmi_name_item(QUECTEL_PACKET_TRANSFER_END_IND), // 0X101
qmi_name_item(QMINAS_GET_SYS_INFO_REQ), // 0x004D
qmi_name_item(QMINAS_GET_SYS_INFO_RESP), // 0x004D
qmi_name_item(QMINAS_SYS_INFO_IND), // 0x004D
qmi_name_item(QMINAS_GET_SIG_INFO_REQ),
qmi_name_item(QMINAS_GET_SIG_INFO_RESP),
};
static const QMI_NAME_T qmux_wms_Type[] = {
// ======================= WMS ==============================
qmi_name_item(QMIWMS_SET_EVENT_REPORT_REQ), // 0x0001
qmi_name_item(QMIWMS_SET_EVENT_REPORT_RESP), // 0x0001
qmi_name_item(QMIWMS_EVENT_REPORT_IND), // 0x0001
qmi_name_item(QMIWMS_RAW_SEND_REQ), // 0x0020
qmi_name_item(QMIWMS_RAW_SEND_RESP), // 0x0020
qmi_name_item(QMIWMS_RAW_WRITE_REQ), // 0x0021
qmi_name_item(QMIWMS_RAW_WRITE_RESP), // 0x0021
qmi_name_item(QMIWMS_RAW_READ_REQ), // 0x0022
qmi_name_item(QMIWMS_RAW_READ_RESP), // 0x0022
qmi_name_item(QMIWMS_MODIFY_TAG_REQ), // 0x0023
qmi_name_item(QMIWMS_MODIFY_TAG_RESP), // 0x0023
qmi_name_item(QMIWMS_DELETE_REQ), // 0x0024
qmi_name_item(QMIWMS_DELETE_RESP), // 0x0024
qmi_name_item(QMIWMS_GET_MESSAGE_PROTOCOL_REQ), // 0x0030
qmi_name_item(QMIWMS_GET_MESSAGE_PROTOCOL_RESP), // 0x0030
qmi_name_item(QMIWMS_LIST_MESSAGES_REQ), // 0x0031
qmi_name_item(QMIWMS_LIST_MESSAGES_RESP), // 0x0031
qmi_name_item(QMIWMS_GET_SMSC_ADDRESS_REQ), // 0x0034
qmi_name_item(QMIWMS_GET_SMSC_ADDRESS_RESP), // 0x0034
qmi_name_item(QMIWMS_SET_SMSC_ADDRESS_REQ), // 0x0035
qmi_name_item(QMIWMS_SET_SMSC_ADDRESS_RESP), // 0x0035
qmi_name_item(QMIWMS_GET_STORE_MAX_SIZE_REQ), // 0x0036
qmi_name_item(QMIWMS_GET_STORE_MAX_SIZE_RESP), // 0x0036
};
static const QMI_NAME_T qmux_wds_admin_Type[] = {
qmi_name_item(QMIWDS_ADMIN_SET_DATA_FORMAT_REQ), // 0x0020
qmi_name_item(QMIWDS_ADMIN_SET_DATA_FORMAT_RESP), // 0x0020
qmi_name_item(QMIWDS_ADMIN_GET_DATA_FORMAT_REQ), // 0x0021
qmi_name_item(QMIWDS_ADMIN_GET_DATA_FORMAT_RESP), // 0x0021
qmi_name_item(QMIWDS_ADMIN_SET_QMAP_SETTINGS_REQ), // 0x002B
qmi_name_item(QMIWDS_ADMIN_SET_QMAP_SETTINGS_RESP), // 0x002B
qmi_name_item(QMIWDS_ADMIN_GET_QMAP_SETTINGS_REQ), // 0x002C
qmi_name_item(QMIWDS_ADMIN_GET_QMAP_SETTINGS_RESP), // 0x002C
qmi_name_item(QMI_WDA_SET_LOOPBACK_CONFIG_REQ), // 0x002F
qmi_name_item(QMI_WDA_SET_LOOPBACK_CONFIG_RESP), // 0x002F
qmi_name_item(QMI_WDA_SET_LOOPBACK_CONFIG_IND), // 0x002F
};
static const QMI_NAME_T qmux_uim_Type[] = {
qmi_name_item( QMIUIM_READ_TRANSPARENT_REQ), // 0x0020
qmi_name_item( QMIUIM_READ_TRANSPARENT_RESP), // 0x0020
qmi_name_item( QMIUIM_READ_TRANSPARENT_IND), // 0x0020
qmi_name_item( QMIUIM_READ_RECORD_REQ), // 0x0021
qmi_name_item( QMIUIM_READ_RECORD_RESP), // 0x0021
qmi_name_item( QMIUIM_READ_RECORD_IND), // 0x0021
qmi_name_item( QMIUIM_WRITE_TRANSPARENT_REQ), // 0x0022
qmi_name_item( QMIUIM_WRITE_TRANSPARENT_RESP), // 0x0022
qmi_name_item( QMIUIM_WRITE_TRANSPARENT_IND), // 0x0022
qmi_name_item( QMIUIM_WRITE_RECORD_REQ), // 0x0023
qmi_name_item( QMIUIM_WRITE_RECORD_RESP), // 0x0023
qmi_name_item( QMIUIM_WRITE_RECORD_IND), // 0x0023
qmi_name_item( QMIUIM_SET_PIN_PROTECTION_REQ), // 0x0025
qmi_name_item( QMIUIM_SET_PIN_PROTECTION_RESP), // 0x0025
qmi_name_item( QMIUIM_SET_PIN_PROTECTION_IND), // 0x0025
qmi_name_item( QMIUIM_VERIFY_PIN_REQ), // 0x0026
qmi_name_item( QMIUIM_VERIFY_PIN_RESP), // 0x0026
qmi_name_item( QMIUIM_VERIFY_PIN_IND), // 0x0026
qmi_name_item( QMIUIM_UNBLOCK_PIN_REQ), // 0x0027
qmi_name_item( QMIUIM_UNBLOCK_PIN_RESP), // 0x0027
qmi_name_item( QMIUIM_UNBLOCK_PIN_IND), // 0x0027
qmi_name_item( QMIUIM_CHANGE_PIN_REQ), // 0x0028
qmi_name_item( QMIUIM_CHANGE_PIN_RESP), // 0x0028
qmi_name_item( QMIUIM_CHANGE_PIN_IND), // 0x0028
qmi_name_item( QMIUIM_DEPERSONALIZATION_REQ), // 0x0029
qmi_name_item( QMIUIM_DEPERSONALIZATION_RESP), // 0x0029
qmi_name_item( QMIUIM_EVENT_REG_REQ), // 0x002E
qmi_name_item( QMIUIM_EVENT_REG_RESP), // 0x002E
qmi_name_item( QMIUIM_GET_CARD_STATUS_REQ), // 0x002F
qmi_name_item( QMIUIM_GET_CARD_STATUS_RESP), // 0x002F
qmi_name_item( QMIUIM_STATUS_CHANGE_IND), // 0x0032
};
static const QMI_NAME_T qmux_coex_Type[] = {
qmi_name_item(QMI_COEX_GET_WWAN_STATE_REQ), // 0x0022
qmi_name_item(QMI_COEX_GET_WWAN_STATE_RESP), // 0x0022
};
static const char * qmi_name_get(const QMI_NAME_T *table, size_t size, int type, const char *tag) {
static char unknow[40];
size_t i;
if (qmux_CtlFlags == table) {
if (!strcmp(tag, "_REQ"))
tag = "_CMD";
else if (!strcmp(tag, "_RESP"))
tag = "_RSP";
}
for (i = 0; i < size; i++) {
if (table[i].type == (UINT)type) {
if (!tag || (strstr(table[i].name, tag)))
return table[i].name;
}
}
sprintf(unknow, "unknow_%x", type);
return unknow;
}
#define QMI_NAME(table, type) qmi_name_get(table, sizeof(table) / sizeof(table[0]), type, 0)
#define QMUX_NAME(table, type, tag) qmi_name_get(table, sizeof(table) / sizeof(table[0]), type, tag)
void dump_tlv(PQCQMUX_MSG_HDR pQMUXMsgHdr) {
int TLVFind = 0;
int i;
//dbg("QCQMUX_TLV-----------------------------------\n");
//dbg("{Type,\tLength,\tValue}\n");
while (1) {
PQMI_TLV_HDR TLVHdr = GetTLV(pQMUXMsgHdr, 0x1000 + (++TLVFind));
if (TLVHdr == NULL)
break;
//if ((TLVHdr->TLVType == 0x02) && ((USHORT *)(TLVHdr+1))[0])
{
dbg("{%02x,\t%04x,\t", TLVHdr->TLVType, le16_to_cpu(TLVHdr->TLVLength));
for (i = 0; i < le16_to_cpu(TLVHdr->TLVLength); i++) {
dbg("%02x ", ((UCHAR *)(TLVHdr+1))[i]);
}
dbg("}\n");
}
} // while
}
void dump_ctl(PQCQMICTL_MSG_HDR CTLHdr) {
const char *tag;
//dbg("QCQMICTL_MSG--------------------------------------------\n");
//dbg("CtlFlags: %02x\t\t%s\n", CTLHdr->CtlFlags, QMI_NAME(qmi_ctl_CtlFlags, CTLHdr->CtlFlags));
dbg("TransactionId: %02x\n", CTLHdr->TransactionId);
switch (CTLHdr->CtlFlags) {
case QMICTL_FLAG_REQUEST: tag = "_REQ"; break;
case QMICTL_FLAG_RESPONSE: tag = "_RESP"; break;
case QMICTL_FLAG_INDICATION: tag = "_IND"; break;
default: tag = 0; break;
}
dbg("QMICTLType: %04x\t%s\n", le16_to_cpu(CTLHdr->QMICTLType),
QMUX_NAME(qmux_ctl_QMICTLType, le16_to_cpu(CTLHdr->QMICTLType), tag));
dbg("Length: %04x\n", le16_to_cpu(CTLHdr->Length));
dump_tlv((PQCQMUX_MSG_HDR)(&CTLHdr->QMICTLType));
}
int dump_qmux(QMI_SERVICE_TYPE serviceType, PQCQMUX_HDR QMUXHdr) {
PQCQMUX_MSG_HDR QMUXMsgHdr = (PQCQMUX_MSG_HDR) (QMUXHdr + 1);
const char *tag;
//dbg("QCQMUX--------------------------------------------\n");
switch (QMUXHdr->CtlFlags&QMUX_CTL_FLAG_MASK_TYPE) {
case QMUX_CTL_FLAG_TYPE_CMD: tag = "_REQ"; break;
case QMUX_CTL_FLAG_TYPE_RSP: tag = "_RESP"; break;
case QMUX_CTL_FLAG_TYPE_IND: tag = "_IND"; break;
default: tag = 0; break;
}
//dbg("CtlFlags: %02x\t\t%s\n", QMUXHdr->CtlFlags, QMUX_NAME(qmux_CtlFlags, QMUXHdr->CtlFlags, tag));
dbg("TransactionId: %04x\n", le16_to_cpu(QMUXHdr->TransactionId));
//dbg("QCQMUX_MSG_HDR-----------------------------------\n");
switch (serviceType) {
case QMUX_TYPE_DMS:
dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type),
QMUX_NAME(qmux_dms_Type, le16_to_cpu(QMUXMsgHdr->Type), tag));
break;
case QMUX_TYPE_NAS:
dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type),
QMUX_NAME(qmux_nas_Type, le16_to_cpu(QMUXMsgHdr->Type), tag));
break;
case QMUX_TYPE_WDS:
case QMUX_TYPE_WDS_IPV6:
dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type),
QMUX_NAME(qmux_wds_Type, le16_to_cpu(QMUXMsgHdr->Type), tag));
break;
case QMUX_TYPE_WMS:
dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type),
QMUX_NAME(qmux_wms_Type, le16_to_cpu(QMUXMsgHdr->Type), tag));
break;
case QMUX_TYPE_WDS_ADMIN:
dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type),
QMUX_NAME(qmux_wds_admin_Type, le16_to_cpu(QMUXMsgHdr->Type), tag));
break;
case QMUX_TYPE_UIM:
dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type),
QMUX_NAME(qmux_uim_Type, le16_to_cpu(QMUXMsgHdr->Type), tag));
break;
case QMUX_TYPE_PDS:
case QMUX_TYPE_QOS:
dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type),
QMUX_NAME(qmux_qos_Type, le16_to_cpu(QMUXMsgHdr->Type), tag));
break;
case QMUX_TYPE_COEX:
dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type),
QMUX_NAME(qmux_coex_Type, le16_to_cpu(QMUXMsgHdr->Type), tag));
break;
case QMUX_TYPE_CTL:
default:
dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), "PDS/QOS/CTL/unknown!");
break;
}
dbg("Length: %04x\n", le16_to_cpu(QMUXMsgHdr->Length));
dump_tlv(QMUXMsgHdr);
return 0;
}
void dump_qmi(void *dataBuffer, int dataLen)
{
PQCQMI_HDR QMIHdr = (PQCQMI_HDR)dataBuffer;
PQCQMUX_HDR QMUXHdr = (PQCQMUX_HDR) (QMIHdr + 1);
PQCQMICTL_MSG_HDR CTLHdr = (PQCQMICTL_MSG_HDR) (QMIHdr + 1);
int i;
if (!debug_qmi)
return;
pthread_mutex_lock(&dumpQMIMutex);
line[0] = 0;
for (i = 0; i < dataLen; i++) {
dbg("%02x ", ((unsigned char *)dataBuffer)[i]);
}
dbg_time("%s", line);
line[0] = 0;
//dbg("QCQMI_HDR-----------------------------------------");
//dbg("IFType: %02x\t\t%s", QMIHdr->IFType, QMI_NAME(qmi_IFType, QMIHdr->IFType));
//dbg("Length: %04x", le16_to_cpu(QMIHdr->Length));
//dbg("CtlFlags: %02x\t\t%s", QMIHdr->CtlFlags, QMI_NAME(qmi_CtlFlags, QMIHdr->CtlFlags));
//dbg("QMIType: %02x\t\t%s", QMIHdr->QMIType, QMI_NAME(qmi_QMIType, QMIHdr->QMIType));
//dbg("ClientId: %02x", QMIHdr->ClientId);
if (QMIHdr->QMIType == QMUX_TYPE_CTL) {
dump_ctl(CTLHdr);
} else {
dump_qmux(QMIHdr->QMIType, QMUXHdr);
}
dbg_time("%s", line);
pthread_mutex_unlock(&dumpQMIMutex);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,423 @@
#ifndef __QMI_THREAD_H__
#define __QMI_THREAD_H__
#define CONFIG_GOBINET
#define CONFIG_QMIWWAN
#define CONFIG_SIM
#define CONFIG_APN
#define CONFIG_VERSION
//#define CONFIG_SIGNALINFO
//#define CONFIG_CELLINFO
//#define CONFIG_COEX_WWAN_STATE
#define CONFIG_DEFAULT_PDP 1
//#define CONFIG_IMSI_ICCID
#define QUECTEL_UL_DATA_AGG
//#define QUECTEL_QMI_MERGE
//#define REBOOT_SIM_CARD_WHEN_APN_CHANGE
//#define REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS 60 //unit is seconds
//#define CONFIG_QRTR
//#define CONFIG_ENABLE_QOS
//#define CONFIG_REG_QOS_IND
//#define CONFIG_GET_QOS_INFO
//#define CONFIG_GET_QOS_DATA_RATE
#if (defined(CONFIG_REG_QOS_IND) || defined(CONFIG_GET_QOS_INFO) || defined(CONFIG_GET_QOS_DATA_RATE))
#ifndef CONFIG_REG_QOS_IND
#define CONFIG_REG_QOS_IND
#endif
#ifndef CONFIG_ENABLE_QOS
#define CONFIG_ENABLE_QOS
#endif
#endif
#include <stdio.h>
#include <string.h>
#include <termios.h>
#include <stdio.h>
#include <ctype.h>
#include <time.h>
#include <fcntl.h>
#include <signal.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <pthread.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/epoll.h>
#include <poll.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <stddef.h>
#include "qendian.h"
#include "QCQMI.h"
#include "QCQCTL.h"
#include "QCQMUX.h"
#include "util.h"
#define DEVICE_CLASS_UNKNOWN 0
#define DEVICE_CLASS_CDMA 1
#define DEVICE_CLASS_GSM 2
#define WWAN_DATA_CLASS_NONE 0x00000000
#define WWAN_DATA_CLASS_GPRS 0x00000001
#define WWAN_DATA_CLASS_EDGE 0x00000002 /* EGPRS */
#define WWAN_DATA_CLASS_UMTS 0x00000004
#define WWAN_DATA_CLASS_HSDPA 0x00000008
#define WWAN_DATA_CLASS_HSUPA 0x00000010
#define WWAN_DATA_CLASS_LTE 0x00000020
#define WWAN_DATA_CLASS_5G_NSA 0x00000040
#define WWAN_DATA_CLASS_5G_SA 0x00000080
#define WWAN_DATA_CLASS_1XRTT 0x00010000
#define WWAN_DATA_CLASS_1XEVDO 0x00020000
#define WWAN_DATA_CLASS_1XEVDO_REVA 0x00040000
#define WWAN_DATA_CLASS_1XEVDV 0x00080000
#define WWAN_DATA_CLASS_3XRTT 0x00100000
#define WWAN_DATA_CLASS_1XEVDO_REVB 0x00200000 /* for future use */
#define WWAN_DATA_CLASS_UMB 0x00400000
#define WWAN_DATA_CLASS_CUSTOM 0x80000000
struct wwan_data_class_str {
ULONG class;
const char *str;
};
#pragma pack(push, 1)
typedef struct __IPV4 {
uint32_t Address;
uint32_t Gateway;
uint32_t SubnetMask;
uint32_t DnsPrimary;
uint32_t DnsSecondary;
uint32_t Mtu;
} IPV4_T;
typedef struct __IPV6 {
UCHAR Address[16];
UCHAR Gateway[16];
UCHAR SubnetMask[16];
UCHAR DnsPrimary[16];
UCHAR DnsSecondary[16];
UCHAR PrefixLengthIPAddr;
UCHAR PrefixLengthGateway;
ULONG Mtu;
} IPV6_T;
typedef struct {
UINT size;
UINT rx_urb_size;
UINT ep_type;
UINT iface_id;
UINT MuxId;
UINT ul_data_aggregation_max_datagrams; //0x17
UINT ul_data_aggregation_max_size ;//0x18
UINT dl_minimum_padding; //0x1A
} QMAP_SETTING;
//Configured downlink data aggregationprotocol
#define WDA_DL_DATA_AGG_DISABLED (0x00) //DL data aggregation is disabled (default)
#define WDA_DL_DATA_AGG_TLP_ENABLED (0x01) // DL TLP is enabled
#define WDA_DL_DATA_AGG_QC_NCM_ENABLED (0x02) // DL QC_NCM isenabled
#define WDA_DL_DATA_AGG_MBIM_ENABLED (0x03) // DL MBIM isenabled
#define WDA_DL_DATA_AGG_RNDIS_ENABLED (0x04) // DL RNDIS is enabled
#define WDA_DL_DATA_AGG_QMAP_ENABLED (0x05) // DL QMAP isenabled
#define WDA_DL_DATA_AGG_QMAP_V2_ENABLED (0x06) // DL QMAP V2 is enabled
#define WDA_DL_DATA_AGG_QMAP_V3_ENABLED (0x07) // DL QMAP V3 is enabled
#define WDA_DL_DATA_AGG_QMAP_V4_ENABLED (0x08) // DL QMAP V4 is enabled
#define WDA_DL_DATA_AGG_QMAP_V5_ENABLED (0x09) // DL QMAP V5 is enabled
typedef struct {
unsigned int size;
unsigned int rx_urb_size;
unsigned int ep_type;
unsigned int iface_id;
unsigned int qmap_mode;
unsigned int qmap_version;
unsigned int dl_minimum_padding;
char ifname[8][16];
unsigned char mux_id[8];
} RMNET_INFO;
#define IpFamilyV4 (0x04)
#define IpFamilyV6 (0x06)
struct __PROFILE;
struct qmi_device_ops {
int (*init)(struct __PROFILE *profile);
int (*deinit)(void);
int (*send)(PQCQMIMSG pRequest);
void* (*read)(void *pData);
};
#ifdef CONFIG_QRTR
extern const struct qmi_device_ops qrtr_qmidev_ops;
#endif
extern const struct qmi_device_ops gobi_qmidev_ops;
extern const struct qmi_device_ops qmiwwan_qmidev_ops;
extern const struct qmi_device_ops mbim_dev_ops;
extern const struct qmi_device_ops atc_dev_ops;
extern int (*qmidev_send)(PQCQMIMSG pRequest);
struct usb_device_info {
int idVendor;
int idProduct;
int busnum;
int devnum;
int bNumInterfaces;
};
struct usb_interface_info {
int bNumEndpoints;
int bInterfaceClass;
int bInterfaceSubClass;
int bInterfaceProtocol;
char driver[32];
};
#define LIBQMI_PROXY "qmi-proxy" //src/libqmi-glib/qmi-proxy.h
#define LIBMBIM_PROXY "mbim-proxy"
#define QUECTEL_QMI_PROXY "quectel-qmi-proxy"
#define QUECTEL_MBIM_PROXY "quectel-mbim-proxy"
#define QUECTEL_ATC_PROXY "quectel-atc-proxy"
#define QUECTEL_QRTR_PROXY "quectel-qrtr-proxy"
#ifndef bool
#define bool uint8_t
#endif
struct request_ops;
typedef struct __PROFILE {
//user input start
const char *apn;
const char *user;
const char *password;
int auth;
int iptype;
const char *pincode;
char proxy[32];
int pdp;//pdp_context
int profile_index;//profile_index
int enable_bridge;
bool enable_ipv4;
bool enable_ipv6;
bool no_dhcp;
const char *logfile;
const char *usblogfile;
char expect_adapter[32];
int kill_pdp;
int replication_factor;
//user input end
char qmichannel[32];
char usbnet_adapter[32];
char qmapnet_adapter[32];
char driver_name[32];
int qmap_mode;
int qmap_size;
int qmap_version;
int curIpFamily;
int rawIP;
int muxid;
#ifdef CONFIG_ENABLE_QOS
UINT qos_id;
#endif
int wda_client;
uint32_t udhcpc_ip;
IPV4_T ipv4;
IPV6_T ipv6;
UINT PCSCFIpv4Addr1;
UINT PCSCFIpv4Addr2;
UCHAR PCSCFIpv6Addr1[16];
UCHAR PCSCFIpv6Addr2[16];
bool reattach_flag;
int hardware_interface;
int software_interface;
struct usb_device_info usb_dev;
struct usb_interface_info usb_intf;
int usbmon_fd;
FILE *usbmon_logfile_fp;
bool loopback_state;
char BaseBandVersion[64];
char old_apn[64];
char old_user[64];
char old_password[64];
int old_auth;
int old_iptype;
const struct qmi_device_ops *qmi_ops;
const struct request_ops *request_ops;
RMNET_INFO rmnet_info;
} PROFILE_T;
#ifdef QUECTEL_QMI_MERGE
#define MERGE_PACKET_IDENTITY 0x2c7c
#define MERGE_PACKET_VERSION 0x0001
#define MERGE_PACKET_MAX_PAYLOAD_SIZE 56
typedef struct __QMI_MSG_HEADER {
uint16_t idenity;
uint16_t version;
uint16_t cur_len;
uint16_t total_len;
} QMI_MSG_HEADER;
typedef struct __QMI_MSG_PACKET {
QMI_MSG_HEADER header;
uint16_t len;
char buf[4096];
} QMI_MSG_PACKET;
#endif
typedef enum {
SIM_ABSENT = 0,
SIM_NOT_READY = 1,
SIM_READY = 2, /* SIM_READY means the radio state is RADIO_STATE_SIM_READY */
SIM_PIN = 3,
SIM_PUK = 4,
SIM_NETWORK_PERSONALIZATION = 5,
SIM_BAD = 6,
} SIM_Status;
#pragma pack(pop)
#define WDM_DEFAULT_BUFSIZE 256
#define RIL_REQUEST_QUIT 0x1000
#define RIL_INDICATE_DEVICE_CONNECTED 0x1002
#define RIL_INDICATE_DEVICE_DISCONNECTED 0x1003
#define RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED 0x1004
#define RIL_UNSOL_DATA_CALL_LIST_CHANGED 0x1005
#define MODEM_REPORT_RESET_EVENT 0x1006
#define RIL_UNSOL_LOOPBACK_CONFIG_IND 0x1007
#ifdef CONFIG_REG_QOS_IND
#define RIL_UNSOL_GLOBAL_QOS_FLOW_IND_QOS_ID 0x1008
#endif
extern pthread_mutex_t cm_command_mutex;
extern pthread_cond_t cm_command_cond;
extern unsigned int cm_recv_buf[1024];
extern int cm_open_dev(const char *dev);
extern int cm_open_proxy(const char *name);
extern int pthread_cond_timeout_np(pthread_cond_t *cond, pthread_mutex_t * mutex, unsigned msecs);
extern int QmiThreadSendQMITimeout(PQCQMIMSG pRequest, PQCQMIMSG *ppResponse, unsigned msecs, const char *funcname);
#define QmiThreadSendQMI(pRequest, ppResponse) QmiThreadSendQMITimeout(pRequest, ppResponse, 30 * 1000, __func__)
extern void QmiThreadRecvQMI(PQCQMIMSG pResponse);
extern void udhcpc_start(PROFILE_T *profile);
extern void udhcpc_stop(PROFILE_T *profile);
extern void ql_set_driver_link_state(PROFILE_T *profile, int link_state);
extern void ql_set_driver_qmap_setting(PROFILE_T *profile, QMAP_SETTING *qmap_settings);
extern void ql_get_driver_rmnet_info(PROFILE_T *profile, RMNET_INFO *rmnet_info);
extern void dump_qmi(void *dataBuffer, int dataLen);
extern void qmidevice_send_event_to_main(int triger_event);
extern void qmidevice_send_event_to_main_ext(int triger_event, void *data, unsigned len);
extern uint8_t qmi_over_mbim_get_client_id(uint8_t QMIType);
extern uint8_t qmi_over_mbim_release_client_id(uint8_t QMIType, uint8_t ClientId);
#ifdef CONFIG_REG_QOS_IND
extern UCHAR ql_get_global_qos_flow_ind_qos_id(PQCQMIMSG pResponse, UINT *qos_id);
#endif
#ifdef CONFIG_GET_QOS_DATA_RATE
extern UCHAR ql_get_global_qos_flow_ind_data_rate(PQCQMIMSG pResponse, void *max_data_rate);
#endif
struct request_ops {
int (*requestBaseBandVersion)(PROFILE_T *profile);
int (*requestSetEthMode)(PROFILE_T *profile);
int (*requestSetLoopBackState)(UCHAR loopback_state, ULONG replication_factor);
int (*requestGetSIMStatus)(SIM_Status *pSIMStatus);
int (*requestEnterSimPin)(const char *pPinCode);
int (*requestSetProfile)(PROFILE_T *profile); // 1 ~ success and apn change, 0 ~ success and no apn change, -1 ~ fail
int (*requestGetProfile)(PROFILE_T *profile);
int (*requestRegistrationState)(UCHAR *pPSAttachedState);
int (*requestSetupDataCall)(PROFILE_T *profile, int curIpFamily);
int (*requestQueryDataCall)(UCHAR *pConnectionStatus, int curIpFamily);
int (*requestDeactivateDefaultPDP)(PROFILE_T *profile, int curIpFamily);
int (*requestGetIPAddress)(PROFILE_T *profile, int curIpFamily);
int (*requestGetSignalInfo)(void);
int (*requestGetCellInfoList)(void);
int (*requestGetICCID)(void);
int (*requestGetIMSI)(void);
int (*requestRadioPower)(int state);
int (*requestRegisterQos)(PROFILE_T *profile);
int (*requestGetQosInfo)(PROFILE_T *profile);
int (*requestGetCoexWWANState)(void);
};
extern const struct request_ops qmi_request_ops;
extern const struct request_ops mbim_request_ops;
extern const struct request_ops atc_request_ops;
extern int get_driver_type(PROFILE_T *profile);
extern BOOL qmidevice_detect(char *qmichannel, char *usbnet_adapter, unsigned bufsize, PROFILE_T *profile);
int mhidevice_detect(char *qmichannel, char *usbnet_adapter, PROFILE_T *profile);
int atdevice_detect(char *atchannel, char *usbnet_adapter, PROFILE_T *profile);
extern int ql_bridge_mode_detect(PROFILE_T *profile);
extern int ql_enable_qmi_wwan_rawip_mode(PROFILE_T *profile);
extern int ql_qmap_mode_detect(PROFILE_T *profile);
#ifdef CONFIG_QRTR
extern int rtrmnet_ctl_create_vnd(char *devname, char *vndname, uint8_t muxid,
uint32_t qmap_version, uint32_t ul_agg_cnt, uint32_t ul_agg_size);
#endif
#define qmidev_is_gobinet(_qmichannel) (strncmp(_qmichannel, "/dev/qcqmi", strlen("/dev/qcqmi")) == 0)
#define qmidev_is_qmiwwan(_qmichannel) (strncmp(_qmichannel, "/dev/cdc-wdm", strlen("/dev/cdc-wdm")) == 0)
#define qmidev_is_pciemhi(_qmichannel) (strncmp(_qmichannel, "/dev/mhi_", strlen("/dev/mhi_")) == 0)
#define driver_is_qmi(_drv_name) (strncasecmp(_drv_name, "qmi_wwan", strlen("qmi_wwan")) == 0)
#define driver_is_mbim(_drv_name) (strncasecmp(_drv_name, "cdc_mbim", strlen("cdc_mbim")) == 0)
extern FILE *logfilefp;
extern int debug_qmi;
extern int qmidevice_control_fd[2];
extern int g_donot_exit_when_modem_hangup;
extern void update_resolv_conf(int iptype, const char *ifname, const char *dns1, const char *dns2);
void update_ipv4_address(const char *ifname, const char *ip, const char *gw, unsigned prefix);
void update_ipv6_address(const char *ifname, const char *ip, const char *gw, unsigned prefix);
int reattach_driver(PROFILE_T *profile);
extern void no_trunc_strncpy(char *dest, const char *src, size_t dest_size);
enum
{
DRV_INVALID,
SOFTWARE_QMI,
SOFTWARE_MBIM,
SOFTWARE_ECM_RNDIS_NCM,
SOFTWARE_QRTR,
HARDWARE_PCIE,
HARDWARE_USB,
};
enum
{
SIG_EVENT_START,
SIG_EVENT_CHECK,
SIG_EVENT_STOP,
};
typedef enum
{
DMS_OP_MODE_ONLINE,
DMS_OP_MODE_LOW_POWER,
DMS_OP_MODE_FACTORY_TEST_MODE,
DMS_OP_MODE_OFFLINE,
DMS_OP_MODE_RESETTING,
DMS_OP_MODE_SHUTTING_DOWN,
DMS_OP_MODE_PERSISTENT_LOW_POWER,
DMS_OP_MODE_MODE_ONLY_LOW_POWER,
DMS_OP_MODE_NET_TEST_GW,
}Device_operating_mode;
#ifdef CM_DEBUG
#define dbg_time(fmt, args...) do { \
fprintf(stdout, "[%15s-%04d: %s] " fmt "\n", __FILE__, __LINE__, get_time(), ##args); \
fflush(stdout);\
if (logfilefp) fprintf(logfilefp, "[%s-%04d: %s] " fmt "\n", __FILE__, __LINE__, get_time(), ##args); \
} while(0)
#else
#define dbg_time(fmt, args...) do { \
fprintf(stdout, "[%s] " fmt "\n", get_time(), ##args); \
fflush(stdout);\
if (logfilefp) fprintf(logfilefp, "[%s] " fmt "\n", get_time(), ##args); \
} while(0)
#endif
#endif

View File

@ -0,0 +1,459 @@
/******************************************************************************
@file QmiWwanCM.c
@brief QMI WWAN connectivity manager.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include <stdio.h>
#include <string.h>
#include <termios.h>
#include <stdio.h>
#include <ctype.h>
#include "QMIThread.h"
#ifdef CONFIG_QMIWWAN
static int cdc_wdm_fd = -1;
static UCHAR qmiclientId[QMUX_TYPE_ALL];
static UCHAR GetQCTLTransactionId(void) {
static int TransactionId = 0;
if (++TransactionId > 0xFF)
TransactionId = 1;
return TransactionId;
}
typedef USHORT (*CUSTOMQCTL)(PQMICTL_MSG pCTLMsg, void *arg);
static PQCQMIMSG ComposeQCTLMsg(USHORT QMICTLType, CUSTOMQCTL customQctlMsgFunction, void *arg) {
UCHAR QMIBuf[WDM_DEFAULT_BUFSIZE];
PQCQMIMSG pRequest = (PQCQMIMSG)QMIBuf;
int Length;
pRequest->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI;
pRequest->QMIHdr.CtlFlags = 0x00;
pRequest->QMIHdr.QMIType = QMUX_TYPE_CTL;
pRequest->QMIHdr.ClientId= 0x00;
pRequest->CTLMsg.QMICTLMsgHdr.CtlFlags = QMICTL_FLAG_REQUEST;
pRequest->CTLMsg.QMICTLMsgHdr.TransactionId = GetQCTLTransactionId();
pRequest->CTLMsg.QMICTLMsgHdr.QMICTLType = cpu_to_le16(QMICTLType);
if (customQctlMsgFunction)
pRequest->CTLMsg.QMICTLMsgHdr.Length = cpu_to_le16(customQctlMsgFunction(&pRequest->CTLMsg, arg) - sizeof(QCQMICTL_MSG_HDR));
else
pRequest->CTLMsg.QMICTLMsgHdr.Length = cpu_to_le16(0x0000);
pRequest->QMIHdr.Length = cpu_to_le16(le16_to_cpu(pRequest->CTLMsg.QMICTLMsgHdr.Length) + sizeof(QCQMICTL_MSG_HDR) + sizeof(QCQMI_HDR) - 1);
Length = le16_to_cpu(pRequest->QMIHdr.Length) + 1;
pRequest = (PQCQMIMSG)malloc(Length);
if (pRequest == NULL) {
dbg_time("%s fail to malloc", __func__);
} else {
memcpy(pRequest, QMIBuf, Length);
}
return pRequest;
}
static USHORT CtlGetVersionReq(PQMICTL_MSG QCTLMsg, void *arg)
{
(void)arg;
QCTLMsg->GetVersionReq.TLVType = QCTLV_TYPE_REQUIRED_PARAMETER;
QCTLMsg->GetVersionReq.TLVLength = cpu_to_le16(0x0001);
QCTLMsg->GetVersionReq.QMUXTypes = QMUX_TYPE_ALL;
return sizeof(QMICTL_GET_VERSION_REQ_MSG);
}
static USHORT CtlGetClientIdReq(PQMICTL_MSG QCTLMsg, void *arg) {
QCTLMsg->GetClientIdReq.TLVType = QCTLV_TYPE_REQUIRED_PARAMETER;
QCTLMsg->GetClientIdReq.TLVLength = cpu_to_le16(0x0001);
QCTLMsg->GetClientIdReq.QMIType = ((UCHAR *)arg)[0];
return sizeof(QMICTL_GET_CLIENT_ID_REQ_MSG);
}
static USHORT CtlReleaseClientIdReq(PQMICTL_MSG QCTLMsg, void *arg) {
QCTLMsg->ReleaseClientIdReq.TLVType = QCTLV_TYPE_REQUIRED_PARAMETER;
QCTLMsg->ReleaseClientIdReq.TLVLength = cpu_to_le16(0x0002);
QCTLMsg->ReleaseClientIdReq.QMIType = ((UCHAR *)arg)[0];
QCTLMsg->ReleaseClientIdReq.ClientId = ((UCHAR *)arg)[1] ;
return sizeof(QMICTL_RELEASE_CLIENT_ID_REQ_MSG);
}
static USHORT CtlLibQmiProxyOpenReq(PQMICTL_MSG QCTLMsg, void *arg)
{
(void)arg;
const char *device_path = (const char *)(arg);
QCTLMsg->LibQmiProxyOpenReq.TLVType = 0x01;
QCTLMsg->LibQmiProxyOpenReq.TLVLength = cpu_to_le16(strlen(device_path));
//strcpy(QCTLMsg->LibQmiProxyOpenReq.device_path, device_path);
//__builtin___strcpy_chk
memcpy(QCTLMsg->LibQmiProxyOpenReq.device_path, device_path, strlen(device_path));
return sizeof(QMICTL_LIBQMI_PROXY_OPEN_MSG) + (strlen(device_path));
}
static int libqmi_proxy_open(const char *cdc_wdm) {
int ret;
PQCQMIMSG pResponse;
ret = QmiThreadSendQMI(ComposeQCTLMsg(QMI_MESSAGE_CTL_INTERNAL_PROXY_OPEN,
CtlLibQmiProxyOpenReq, (void *)cdc_wdm), &pResponse);
if (!ret && pResponse
&& pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXResult == 0
&& pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXError == 0) {
ret = 0;
}
else {
return -1;
}
if (pResponse)
free(pResponse);
return ret;
}
static int QmiWwanSendQMI(PQCQMIMSG pRequest) {
struct pollfd pollfds[]= {{cdc_wdm_fd, POLLOUT, 0}};
int ret;
if (cdc_wdm_fd == -1) {
dbg_time("%s cdc_wdm_fd = -1", __func__);
return -ENODEV;
}
if (pRequest->QMIHdr.QMIType != QMUX_TYPE_CTL) {
pRequest->QMIHdr.ClientId = qmiclientId[pRequest->QMIHdr.QMIType];
if (pRequest->QMIHdr.ClientId == 0) {
dbg_time("QMIType %d has no clientID", pRequest->QMIHdr.QMIType);
return -ENODEV;
}
if (pRequest->QMIHdr.QMIType == QMUX_TYPE_WDS_IPV6)
pRequest->QMIHdr.QMIType = QMUX_TYPE_WDS;
}
do {
ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000);
} while ((ret < 0) && (errno == EINTR));
if (pollfds[0].revents & POLLOUT) {
ssize_t nwrites = le16_to_cpu(pRequest->QMIHdr.Length) + 1;
ret = write(cdc_wdm_fd, pRequest, nwrites);
if (ret == nwrites) {
ret = 0;
} else {
dbg_time("%s write=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno));
}
} else {
dbg_time("%s poll=%d, revents = 0x%x, errno: %d (%s)", __func__, ret, pollfds[0].revents, errno, strerror(errno));
}
return ret;
}
static UCHAR QmiWwanGetClientID(UCHAR QMIType) {
PQCQMIMSG pResponse;
QmiThreadSendQMI(ComposeQCTLMsg(QMICTL_GET_CLIENT_ID_REQ, CtlGetClientIdReq, &QMIType), &pResponse);
if (pResponse) {
USHORT QMUXResult = cpu_to_le16(pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXResult); // QMI_RESULT_SUCCESS
USHORT QMUXError = cpu_to_le16(pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXError); // QMI_ERR_INVALID_ARG
//UCHAR QMIType = pResponse->CTLMsg.GetClientIdRsp.QMIType;
UCHAR ClientId = pResponse->CTLMsg.GetClientIdRsp.ClientId;
if (!QMUXResult && !QMUXError && (QMIType == pResponse->CTLMsg.GetClientIdRsp.QMIType)) {
switch (QMIType) {
case QMUX_TYPE_WDS: dbg_time("Get clientWDS = %d", ClientId); break;
case QMUX_TYPE_DMS: dbg_time("Get clientDMS = %d", ClientId); break;
case QMUX_TYPE_NAS: dbg_time("Get clientNAS = %d", ClientId); break;
case QMUX_TYPE_QOS: dbg_time("Get clientQOS = %d", ClientId); break;
case QMUX_TYPE_WMS: dbg_time("Get clientWMS = %d", ClientId); break;
case QMUX_TYPE_PDS: dbg_time("Get clientPDS = %d", ClientId); break;
case QMUX_TYPE_UIM: dbg_time("Get clientUIM = %d", ClientId); break;
case QMUX_TYPE_COEX: dbg_time("Get clientCOEX = %d", ClientId); break;
case QMUX_TYPE_WDS_ADMIN: dbg_time("Get clientWDA = %d", ClientId);
break;
default: break;
}
return ClientId;
}
}
return 0;
}
static int QmiWwanReleaseClientID(QMI_SERVICE_TYPE QMIType, UCHAR ClientId) {
UCHAR argv[] = {QMIType, ClientId};
QmiThreadSendQMI(ComposeQCTLMsg(QMICTL_RELEASE_CLIENT_ID_REQ, CtlReleaseClientIdReq, argv), NULL);
return 0;
}
static int QmiWwanInit(PROFILE_T *profile) {
unsigned i;
int ret;
PQCQMIMSG pResponse;
if (profile->proxy[0] && !strcmp(profile->proxy, LIBQMI_PROXY)) {
ret = libqmi_proxy_open(profile->qmichannel);
if (ret)
return ret;
}
if (!profile->proxy[0]) {
for (i = 0; i < 10; i++) {
ret = QmiThreadSendQMITimeout(ComposeQCTLMsg(QMICTL_SYNC_REQ, NULL, NULL), NULL, 1 * 1000, __func__);
if (!ret)
break;
sleep(1);
}
if (ret)
return ret;
}
QmiThreadSendQMI(ComposeQCTLMsg(QMICTL_GET_VERSION_REQ, CtlGetVersionReq, NULL), &pResponse);
if (profile->qmap_mode) {
if (pResponse) {
if (pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXResult == 0 && pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXError == 0) {
uint8_t NumElements = 0;
for (NumElements = 0; NumElements < pResponse->CTLMsg.GetVersionRsp.NumElements; NumElements++) {
#if 0
dbg_time("QMUXType = %02x Version = %d.%d",
pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].QMUXType,
pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].MajorVersion,
pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].MinorVersion);
#endif
if (pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].QMUXType == QMUX_TYPE_WDS_ADMIN)
profile->qmap_version = (pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].MinorVersion > 16);
}
}
}
}
if (pResponse) free(pResponse);
qmiclientId[QMUX_TYPE_WDS] = QmiWwanGetClientID(QMUX_TYPE_WDS);
if (profile->enable_ipv6)
qmiclientId[QMUX_TYPE_WDS_IPV6] = QmiWwanGetClientID(QMUX_TYPE_WDS);
qmiclientId[QMUX_TYPE_DMS] = QmiWwanGetClientID(QMUX_TYPE_DMS);
qmiclientId[QMUX_TYPE_NAS] = QmiWwanGetClientID(QMUX_TYPE_NAS);
qmiclientId[QMUX_TYPE_UIM] = QmiWwanGetClientID(QMUX_TYPE_UIM);
qmiclientId[QMUX_TYPE_WDS_ADMIN] = QmiWwanGetClientID(QMUX_TYPE_WDS_ADMIN);
#ifdef CONFIG_COEX_WWAN_STATE
qmiclientId[QMUX_TYPE_COEX] = QmiWwanGetClientID(QMUX_TYPE_COEX);
#endif
#ifdef CONFIG_ENABLE_QOS
qmiclientId[QMUX_TYPE_QOS] = QmiWwanGetClientID(QMUX_TYPE_QOS);
#endif
profile->wda_client = qmiclientId[QMUX_TYPE_WDS_ADMIN];
return 0;
}
static int QmiWwanDeInit(void) {
unsigned int i;
for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++)
{
if (qmiclientId[i] != 0)
{
QmiWwanReleaseClientID((QMUX_TYPE_WDS_IPV6 == i ? QMUX_TYPE_WDS : i), qmiclientId[i]);
qmiclientId[i] = 0;
}
}
return 0;
}
static ssize_t qmi_proxy_read (int fd, void *buf, size_t size) {
ssize_t nreads;
PQCQMI_HDR pHdr = (PQCQMI_HDR)buf;
nreads = read(fd, pHdr, sizeof(QCQMI_HDR));
if (nreads == sizeof(QCQMI_HDR) && le16_to_cpu(pHdr->Length) < size) {
nreads += read(fd, pHdr+1, le16_to_cpu(pHdr->Length) + 1 - sizeof(QCQMI_HDR));
}
return nreads;
}
#ifdef QUECTEL_QMI_MERGE
static int QmiWwanMergeQmiRsp(void *buf, ssize_t *src_size) {
static QMI_MSG_PACKET s_QMIPacket;
QMI_MSG_HEADER *header = NULL;
ssize_t size = *src_size;
if((uint16_t)size < sizeof(QMI_MSG_HEADER))
return -1;
header = (QMI_MSG_HEADER *)buf;
if(le16_to_cpu(header->idenity) != MERGE_PACKET_IDENTITY || le16_to_cpu(header->version) != MERGE_PACKET_VERSION || le16_to_cpu(header->cur_len) > le16_to_cpu(header->total_len))
return -1;
if(le16_to_cpu(header->cur_len) == le16_to_cpu(header->total_len)) {
*src_size = le16_to_cpu(header->total_len);
memcpy(buf, buf + sizeof(QMI_MSG_HEADER), *src_size);
s_QMIPacket.len = 0;
return 0;
}
memcpy(s_QMIPacket.buf + s_QMIPacket.len, buf + sizeof(QMI_MSG_HEADER), le16_to_cpu(header->cur_len));
s_QMIPacket.len += le16_to_cpu(header->cur_len);
if (le16_to_cpu(header->cur_len) < MERGE_PACKET_MAX_PAYLOAD_SIZE || s_QMIPacket.len >= le16_to_cpu(header->total_len)) {
memcpy(buf, s_QMIPacket.buf, s_QMIPacket.len);
*src_size = s_QMIPacket.len;
s_QMIPacket.len = 0;
return 0;
}
return -1;
}
#endif
static void * QmiWwanThread(void *pData) {
PROFILE_T *profile = (PROFILE_T *)pData;
const char *cdc_wdm = (const char *)profile->qmichannel;
int wait_for_request_quit = 0;
char num = cdc_wdm[strlen(cdc_wdm)-1];
if (profile->proxy[0]) {
if (!strncmp(profile->proxy, QUECTEL_QMI_PROXY, strlen(QUECTEL_QMI_PROXY))) {
snprintf(profile->proxy, sizeof(profile->proxy), "%s%c", QUECTEL_QMI_PROXY, num);
}
}
else if (!strncmp(cdc_wdm, "/dev/mhi_IPCR", strlen("/dev/mhi_IPCR"))) {
snprintf(profile->proxy, sizeof(profile->proxy), "%s%c", QUECTEL_QRTR_PROXY, num);
}
else if (profile->qmap_mode > 1) {
snprintf(profile->proxy, sizeof(profile->proxy), "%s%c", QUECTEL_QMI_PROXY, num);
}
if (profile->proxy[0])
cdc_wdm_fd = cm_open_proxy(profile->proxy);
else
cdc_wdm_fd = cm_open_dev(cdc_wdm);
if (cdc_wdm_fd == -1) {
dbg_time("%s Failed to open %s, errno: %d (%s)", __func__, cdc_wdm, errno, strerror(errno));
qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED);
pthread_exit(NULL);
return NULL;
}
dbg_time("cdc_wdm_fd = %d", cdc_wdm_fd);
qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED);
while (1) {
struct pollfd pollfds[] = {{qmidevice_control_fd[1], POLLIN, 0}, {cdc_wdm_fd, POLLIN, 0}};
int ne, ret, nevents = sizeof(pollfds)/sizeof(pollfds[0]);
do {
ret = poll(pollfds, nevents, wait_for_request_quit ? 1000 : -1);
} while ((ret < 0) && (errno == EINTR));
if (ret == 0 && wait_for_request_quit) {
QmiThreadRecvQMI(NULL);
continue;
}
if (ret <= 0) {
dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno));
break;
}
for (ne = 0; ne < nevents; ne++) {
int fd = pollfds[ne].fd;
short revents = pollfds[ne].revents;
//dbg_time("{%d, %x, %x}", pollfds[ne].fd, pollfds[ne].events, pollfds[ne].revents);
if (revents & (POLLERR | POLLHUP | POLLNVAL)) {
dbg_time("%s poll err/hup/inval", __func__);
dbg_time("poll fd = %d, events = 0x%04x", fd, revents);
if (fd == cdc_wdm_fd) {
} else {
}
if (revents & (POLLHUP | POLLNVAL)) //EC20 bug, Can get POLLERR
goto __QmiWwanThread_quit;
}
if ((revents & POLLIN) == 0)
continue;
if (fd == qmidevice_control_fd[1]) {
int triger_event;
if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) {
//DBG("triger_event = 0x%x", triger_event);
switch (triger_event) {
case RIL_REQUEST_QUIT:
goto __QmiWwanThread_quit;
break;
case SIG_EVENT_STOP:
wait_for_request_quit = 1;
break;
default:
break;
}
}
}
if (fd == cdc_wdm_fd) {
ssize_t nreads;
PQCQMIMSG pResponse = (PQCQMIMSG)cm_recv_buf;
if (!profile->proxy[0])
nreads = read(fd, cm_recv_buf, sizeof(cm_recv_buf));
else
nreads = qmi_proxy_read(fd, cm_recv_buf, sizeof(cm_recv_buf));
//dbg_time("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno));
if (nreads <= 0) {
dbg_time("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno));
break;
}
#ifdef QUECTEL_QMI_MERGE
if((profile->qmap_mode == 0 || profile->qmap_mode == 1) && QmiWwanMergeQmiRsp(cm_recv_buf, &nreads))
continue;
#endif
if (nreads != (le16_to_cpu(pResponse->QMIHdr.Length) + 1)) {
dbg_time("%s nreads=%d, pQCQMI->QMIHdr.Length = %d", __func__, (int)nreads, le16_to_cpu(pResponse->QMIHdr.Length));
continue;
}
QmiThreadRecvQMI(pResponse);
}
}
}
__QmiWwanThread_quit:
if (cdc_wdm_fd != -1) { close(cdc_wdm_fd); cdc_wdm_fd = -1; }
qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED);
QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI()
dbg_time("%s exit", __func__);
pthread_exit(NULL);
return NULL;
}
const struct qmi_device_ops qmiwwan_qmidev_ops = {
.init = QmiWwanInit,
.deinit = QmiWwanDeInit,
.send = QmiWwanSendQMI,
.read = QmiWwanThread,
};
uint8_t qmi_over_mbim_get_client_id(uint8_t QMIType) {
return QmiWwanGetClientID(QMIType);
}
uint8_t qmi_over_mbim_release_client_id(uint8_t QMIType, uint8_t ClientId) {
return QmiWwanReleaseClientID(QMIType, ClientId);
}
#endif

View File

@ -0,0 +1,339 @@
Release Notes
[V1.6.5]
Date: 7/3/2023
enhancement:
1. Fix the issue of qmi client id leakage caused by kill 9 killing the client of quectel-qmi-proxy
2. Fix wds_ipv6 client ID can't be released issue
3. Fix wds_ipv6 client ID can't be released issue
4. Resolve PDP_ Context&Profile_ The issue of index mixing
5. Add parameter - d to obtain IP and DNS information through qmi
6. Fix mbim dialing. When the user does not specify apn through - s, prompt the user and exit the dialing program
7. Prioritize the use of IP commands for optimization, and use ifconfig if not available
8. Optimize and add/remove copyright
fix:
[V1.6.4]
Date: 9/7/2022
enhancement:
1. set cflags as -Wall -Wextra -Werror -O1, and fix compile errors
2. some code refactoring
3. add quectel-qrtr-proxy
fix:
1. netmask error when use ifconfig on little endian cpu
[V1.6.2]
Date: 11/18/2021
enhancement:
1. support 'LTE && WiFi Coexistence Solution via QMI'.
If customer want use this feature, need enable CONFIG_COEX_WWAN_STATE in QMIThread.h
[V1.6.1]
Date: 7/20/2021
enhancement:
1. add requestGetCellInfoList requestRadioPower
2. add QMI OVER MBIM
3. support qrtr and rmnet
4. support RG500U PCIE
5. add qos service && get qos flow data_rate_max func
fix:
1. mbim: increase mbim open timeout to 3 seconds. some modem take long time for open cmd.
2. support MsChapV2
3. mbim: invalid memory access when only get one DNS
4. some bug fix for use AT Command to setup data call
[V1.6.0.26]
Date: 4/22/2021
enhancement:
1. add lots of log file to show how to use this tool
2. support pcie mhi multiple call
3. at command: support EC200U/EC200T/EC200S/RG801H/RG500U/
fix:
1. mbim-proxy: fix errors on big endian cpu, ignore mbim open/close cmd from quectel-cm
[V1.6.0.25]
Date: 4/8/2021
enhancement:
fix:
1. fix compile error when use gcc 9.3.0
2. fix yocto 'QA Issue: No GNU_HASH in the ELF binary'
[V1.6.0.24]
Date: 3/9/2021
enhancement:
1. '-p [quectel-][qmi|mbim]-proxy', can connect to quectel/libqmi/libmbim's proxy, even only one data
2. set variable s_9x07 as 1 (from 0), most of modems are base on MDM90x7 and later QCOM chip.
fix:
1. define CHAR as signed char
2. mofidy Makefile to generate more compile warnnings and fix them
[V1.6.0.23]
Date: 2/26/2021
enhancement:
1. support 'AT+QNETDEVCTL' (not release)
fix:
1. modify help/usage
2. fix some memroy access error in mbim-cm.c
[V1.6.0.22]
Date: 2/4/2021
enhancement:
1. support connect to libqmi's qmi-proxy
2. only allow ' 0/1/2/none/pap/chap' for auth of '-s'
3. '-m iface-idx' bind QMAP data call to wwan0_<iface_idx>
fix:
[V1.6.0.21]
Date: 1/28/2021
enhancement:
1. print 5G signal
fix:
1. fix compile errors: -Werror=format-truncation=
[V1.6.0.20]
Date: 12/29/2020
enhancement:
1. Code refactoring
2. support 'AT+QNETDEVCTL' (not release)
fix:
[V1.6.0.19]
Date: 12/4/2020
enhancement:
1. if 'udhcpc's default.script' missed, directy set ip/dns/route by 'ip' co,mand
fix:
[V1.6.0.18]
Date: 12/4/2020
enhancement:
1. Code refactoring
fix:
[V1.6.0.17]
Date: 8/25/2020
enhancement:
1. support MBIM multi-call
2. support unisoc RG500U mbim
3. QUECTEL_QMI_MERGE: some SOC can not read more then 64 bytes (QMI)data via USB Endpoint 0
fix:
[V1.6.0.15]
Date: 7/24/2020
enhancement:
fix:
1. QMAP multi-call, AT+CFUN=4 then AT+CFUN=1, only one call can obtain IP by DHCP
[V1.6.0.14]
Date: 6/10/2020
enhancement:
1. support X55's GobiNet LOOPBACK
fix:
1. very old uclib do not support htole32 and pthread_condattr_setclock
2. pthread_cond_wait tv_nsec >= 1000000000U is wrong
3. do not close socket in udhcpc.c ifc_get_addr()
[V1.6.0.13]
Date: 6/9/2020
enhancement:
1. add some example for openwrt, marco 'QL_OPENWER_NETWORK_SETUP'
fix:
[V1.6.0.12]
Date: 5/29/2020
enhancement:
fix:
1. some EM12's usb-net-qmi/mbim interface is at 8 (not 4)
[V1.6.0.11]
Date: 5/28/2020
enhancement:
fix:
1. fix mbim debug on Big Endian CPU
[V1.6.0.10]
Date: 5/25/2020
enhancement:
fix:
1. set QMAP .ul_data_aggregation_max_datagrams to 11 (from 16)
[V1.6.0.9]
Date: 5/22/2020
enhancement:
fix:
1. dial fail when register to 5G-SA
[V1.6.0.8]
Date: 4/30/2020
enhancement:
1. support '-b' to seletc brige mode
fix:
[V1.6.0.7]
Date: 4/29/2020
enhancement:
1. support QMAP multi-call for qmi_wwan_q and pcie_mhi 's rmnet driver
fix:
[V1.6.0.6]
Date: 4/20/2020
enhancement:
1. support '-k pdn_idx' to hangup call '-n pdn_idx'
fix:
1. fix set dl_minimum_padding as 0, modems do not support this featrue
[V1.6.0.5]
Date: 4/10/2020
enhancement:
1. support X55's QMAPV5 for PCIE
fix:
[V1.6.0.3]
Date: 4/8/2020
enhancement:
1. support multi-modems all use multi-data-calls
fix:
[V1.6.0.2]
Date: 4/7/2020
enhancement:
1. support X55's QMAPV5 for USB
fix:
[V1.6.0.1]
Date: 4/1/2020
enhancement:
1. support QMAP UL AGG (multi data call)
fix:
1. some EM12's usb-net-qmi/mbim interface is at 8 (not 4)
[V1.5.9]
Date: 3/4/2020
enhancement:
1. support pcie mhi multi-APN data call
3. support QMAP UL AGG (single data call)
fix:
1. set 4 bytes aligned for mbim parameters, or cause dial mbim call fail
[V1.5.8]
Date: 2/18/2020
enhancement:
1. support '-l 14' X55's loopback function
fix:
[V1.5.7]
Date: 2/6/2020
enhancement:
1. support '-u usbmon_log_file' to catch usbmon log
fix:
[V1.5.6]
Date: 1/20/202
enhancement:
1. show driver name and version
2. support PCSCF
3. support bridge in mbim
fix:
[V1.5.5]
Date: 12/31/2019
enhancement:
fix:
1. fix some memory access bug in mbim-cm.c
[WCDMA&LTE_QConnectManager_Linux&Android_V1.5.4]
Date: 12/17/2019
enhancement:
1. Add copyright
2. auto detect pcie mhi /dev/mhi*
fix:
[WCDMA&LTE_QConnectManager_Linux&Android_V1.5.3]
Date: 2019/12/11
enhancement:
1. support show SignalInfo, controlled by macro CONFIG_SIGNALINFO
2. support show 5G_NSA/5G_NA
3. support Microsoft Extend MBIM message
fix:
1. quectel-qmi-proxy bugs on Big-Endian CPU
[WCDMA&LTE_QConnectManager_Linux&Android_V1.5.2]
Date: 12/2/2019
enhancement:
1. support requestGetSignalInfo()
fix:
[WCDMA&LTE_QConnectManager_Linux&Android_V1.4.1]
Date: 10/23/2019
enhancement:
1. support QMI_CTL_REVOKE_CLIENT_ID_IND (Quectel define QMI)
2. add copyright
fix:
1. remove SIGUSR
[WCDMA&LTE_QConnectManager_Linux&Android_V1.3.10]
Date: 10/14/2019
enhancement:
1. increase retry interval
fix:
[WCDMA&LTE_QConnectManager_Linux&Android_V1.2.1]
Date: 2019/02/26
enhancement:
1. Implement help message.
root@ubuntu:# ./quectel-CM -h
[02-26_10:39:21:353] Usage: ./quectel-CM [options]
[02-26_10:39:21:353] -s [apn [user password auth]] Set apn/user/password/auth get from your network provider
[02-26_10:39:21:353] -p pincode Verify sim card pin if sim card is locked
[02-26_10:39:21:353] -f logfilename Save log message of this program to file
[02-26_10:39:21:353] -i interface Specify network interface(default auto-detect)
[02-26_10:39:21:353] -4 IPv4 protocol
[02-26_10:39:21:353] -6 IPv6 protocol
[02-26_10:39:21:353] -m muxID Specify muxid when set multi-pdn data connection.
[02-26_10:39:21:353] -n channelID Specify channelID when set multi-pdn data connection(default 1).
[02-26_10:39:21:353] [Examples]
[02-26_10:39:21:353] Example 1: ./quectel-CM
[02-26_10:39:21:353] Example 2: ./quectel-CM -s 3gnet
[02-26_10:39:21:353] Example 3: ./quectel-CM -s 3gnet carl 1234 0 -p 1234 -f gobinet_log.txt
root@ubuntu:#
2. Support bridge mode when set multi-pdn data connections.
3. Host device can access network in bridge mode.
[WCDMA&LTE_QConnectManager_Linux&Android_V1.1.46]
Date: 2019/02/18
enhancement:
1. support only IPV6 data call. quectel-CM now support three dialing methods: IPV4 only, IPV6 only, IPV4V6.
./quectel-CM -4(or no argument) only IPV4
-6 only IPV6
-4 -6 IPV4 && IPV6
[WCDMA&LTE_QConnectManager_Linux&Android_V1.1.45]
Date: 2018/09/13
enhancement:
1. support EG12 PCIE interface
[WCDMA&LTE_QConnectManager_Linux&Android_V1.1.44]
Date: 2018/09/10
enhancement:
1. support setup IPV4&IPV6 data call.
[WCDMA&LTE_QConnectManager_Linux&Android_V1.1.43]
[WCDMA&LTE_QConnectManager_Linux&Android_V1.1.42]
Date: 2018/08/29
enhancement:
1. support QMI_WWAN's QMAP fucntion and bridge mode, please contact Quectel FAE to get qmi_wwan.c patch.
when enable QMI_WWAN's QMAP IP Mux function, must run 'quectel-qmi-proxy -d /dev/cdc-wdmX' before quectel-CM
[WCDMA&LTE_QConnectManager_Linux&Android_V1.1.41]
Date: 2018/05/24
enhancement:
1. fix a cdma data call error
[WCDMA&LTE_QConnectManager_Linux&Android_V1.1.40]
Date: 2018/05/12
enhancement:
1. support GobiNet's QMAP fucntion and bridge mode.
'Quectel_WCDMA&LTE_Linux&Android_GobiNet_Driver_V1.3.5' and later version is required to use QMAP and bridge mode.
for detail, please refer to GobiNet Driver

View File

@ -0,0 +1,283 @@
/* //device/system/reference-ril/at_tok.c
**
** Copyright 2006, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#include "at_tok.h"
#include <string.h>
#include <stdio.h>
#include <ctype.h>
#include <stdlib.h>
#include <stdarg.h>
/**
* Starts tokenizing an AT response string
* returns -1 if this is not a valid response string, 0 on success.
* updates *p_cur with current position
*/
int at_tok_start(char **p_cur)
{
if (*p_cur == NULL) {
return -1;
}
// skip prefix
// consume "^[^:]:"
*p_cur = strchr(*p_cur, ':');
if (*p_cur == NULL) {
return -1;
}
(*p_cur)++;
return 0;
}
static void skipWhiteSpace(char **p_cur)
{
if (*p_cur == NULL) return;
while (**p_cur != '\0' && isspace(**p_cur)) {
(*p_cur)++;
}
}
static void skipNextComma(char **p_cur)
{
if (*p_cur == NULL) return;
while (**p_cur != '\0' && **p_cur != ',') {
(*p_cur)++;
}
if (**p_cur == ',') {
(*p_cur)++;
}
}
static char * nextTok(char **p_cur)
{
char *ret = NULL;
skipWhiteSpace(p_cur);
if (*p_cur == NULL) {
ret = NULL;
} else if (**p_cur == '"') {
(*p_cur)++;
ret = strsep(p_cur, "\"");
skipNextComma(p_cur);
} else {
ret = strsep(p_cur, ",");
}
return ret;
}
/**
* Parses the next integer in the AT response line and places it in *p_out
* returns 0 on success and -1 on fail
* updates *p_cur
* "base" is the same as the base param in strtol
*/
static int at_tok_nextint_base(char **p_cur, int *p_out, int base, int uns)
{
char *ret;
if (*p_cur == NULL) {
return -1;
}
ret = nextTok(p_cur);
if (ret == NULL) {
return -1;
} else {
long l;
char *end;
if (uns)
l = strtoul(ret, &end, base);
else
l = strtol(ret, &end, base);
*p_out = (int)l;
if (end == ret) {
return -1;
}
}
return 0;
}
/**
* Parses the next base 10 integer in the AT response line
* and places it in *p_out
* returns 0 on success and -1 on fail
* updates *p_cur
*/
int at_tok_nextint(char **p_cur, int *p_out)
{
return at_tok_nextint_base(p_cur, p_out, 10, 0);
}
/**
* Parses the next base 16 integer in the AT response line
* and places it in *p_out
* returns 0 on success and -1 on fail
* updates *p_cur
*/
int at_tok_nexthexint(char **p_cur, int *p_out)
{
return at_tok_nextint_base(p_cur, p_out, 16, 1);
}
int at_tok_nextbool(char **p_cur, char *p_out)
{
int ret;
int result;
ret = at_tok_nextint(p_cur, &result);
if (ret < 0) {
return -1;
}
// booleans should be 0 or 1
if (!(result == 0 || result == 1)) {
return -1;
}
if (p_out != NULL) {
*p_out = (char)result;
}
return ret;
}
int at_tok_nextstr(char **p_cur, char **p_out)
{
if (*p_cur == NULL) {
return -1;
}
*p_out = nextTok(p_cur);
return 0;
}
/** returns 1 on "has more tokens" and 0 if no */
int at_tok_hasmore(char **p_cur)
{
return ! (*p_cur == NULL || **p_cur == '\0');
}
int at_tok_count(const char *in_line)
{
int commas = 0;
const char *p;
if (!in_line)
return 0;
for (p = in_line; *p != '\0' ; p++) {
if (*p == ',') commas++;
}
return commas;
}
//fmt: d ~ int, x ~ hexint, b ~ bool, s ~ str
int at_tok_scanf(const char *in_line, const char *fmt, ...)
{
int n = 0;
int err;
va_list ap;
const char *p = fmt;
void *d;
void *dump;
static char s_line[1024];
char *line = s_line;
if (!in_line)
return 0;
strncpy(s_line, in_line, sizeof(s_line) - 1);
va_start(ap, fmt);
err = at_tok_start(&line);
if (err < 0) goto error;
for (; *p; p++) {
if (*p == ',' || *p == ' ')
continue;
if (*p != '%') {
goto error;
}
p++;
d = va_arg(ap, void *);
if (!d)
d = &dump;
if (!at_tok_hasmore(&line))
break;
if (*line == '-' && *(line + 1) == ',') {
line += 2;
n++;
if (*p == 'd')
*(int *)d = -1;
continue;
}
switch(*p) {
case 'd':
err = at_tok_nextint(&line, (int *)d);
if (err < 0) goto error;
break;
case 'x':
err = at_tok_nexthexint(&line, (int *)d);
if (err < 0) goto error;
break;
case 'b':
err = at_tok_nextbool(&line, (char *)d);
if (err < 0) goto error;
break;
case 's':
err = at_tok_nextstr(&line, (char **)d); //if strdup(line), here return free memory to caller
if (err < 0) goto error;
break;
default:
goto error;
break;
}
n++;
}
va_end(ap);
error:
//free(line);
return n;
}

View File

@ -0,0 +1,33 @@
/* //device/system/reference-ril/at_tok.h
**
** Copyright 2006, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#ifndef AT_TOK_H
#define AT_TOK_H 1
int at_tok_start(char **p_cur);
int at_tok_nextint(char **p_cur, int *p_out);
int at_tok_nexthexint(char **p_cur, int *p_out);
int at_tok_nextbool(char **p_cur, char *p_out);
int at_tok_nextstr(char **p_cur, char **out);
int at_tok_hasmore(char **p_cur);
int at_tok_count(const char *in_line);
int at_tok_scanf(const char *line, const char *fmt, ...);
#endif /*AT_TOK_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,152 @@
/* //device/system/reference-ril/atchannel.h
**
** Copyright 2006, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#ifndef ATCHANNEL_H
#define ATCHANNEL_H 1
#ifdef __cplusplus
extern "C" {
#endif
/* define AT_DEBUG to send AT traffic to /tmp/radio-at.log" */
#define AT_DEBUG 0
#if AT_DEBUG
extern void AT_DUMP(const char* prefix, const char* buff, int len);
#else
#define AT_DUMP(prefix,buff,len) do{}while(0)
#endif
#define AT_ERROR_GENERIC -1
#define AT_ERROR_COMMAND_PENDING -2
#define AT_ERROR_CHANNEL_CLOSED -3
#define AT_ERROR_TIMEOUT -4
#define AT_ERROR_INVALID_THREAD -5 /* AT commands may not be issued from
reader thread (or unsolicited response
callback */
#define AT_ERROR_INVALID_RESPONSE -6 /* eg an at_send_command_singleline that
did not get back an intermediate
response */
typedef enum {
NO_RESULT, /* no intermediate response expected */
NUMERIC, /* a single intermediate response starting with a 0-9 */
SINGLELINE, /* a single intermediate response starting with a prefix */
MULTILINE /* multiple line intermediate response
starting with a prefix */
} ATCommandType;
/** a singly-lined list of intermediate responses */
typedef struct ATLine {
struct ATLine *p_next;
char *line;
} ATLine;
/** Free this with at_response_free() */
typedef struct {
int success; /* true if final response indicates
success (eg "OK") */
char *finalResponse; /* eg OK, ERROR */
ATLine *p_intermediates; /* any intermediate responses */
} ATResponse;
/**
* a user-provided unsolicited response handler function
* this will be called from the reader thread, so do not block
* "s" is the line, and "sms_pdu" is either NULL or the PDU response
* for multi-line TS 27.005 SMS PDU responses (eg +CMT:)
*/
typedef void (*ATUnsolHandler)(const char *s, const char *sms_pdu);
int at_open(int fd, ATUnsolHandler h, int proxy);
void at_close();
/* This callback is invoked on the command thread.
You should reset or handshake here to avoid getting out of sync */
void at_set_on_timeout(void (*onTimeout)(void));
/* This callback is invoked on the reader thread (like ATUnsolHandler)
when the input stream closes before you call at_close
(not when you call at_close())
You should still call at_close()
It may also be invoked immediately from the current thread if the read
channel is already closed */
void at_set_on_reader_closed(void (*onClose)(void));
int at_send_command_singleline (const char *command,
const char *responsePrefix,
ATResponse **pp_outResponse);
int at_send_command_numeric (const char *command,
ATResponse **pp_outResponse);
int at_send_command_multiline (const char *command,
const char *responsePrefix,
ATResponse **pp_outResponse);
int at_send_command_raw (const char *command,
const char *raw_data, unsigned int raw_len,
const char *responsePrefix,
ATResponse **pp_outResponse);
int at_handshake();
int at_send_command (const char *command, ATResponse **pp_outResponse);
int at_send_command_sms (const char *command, const char *pdu,
const char *responsePrefix,
ATResponse **pp_outResponse);
void at_response_free(ATResponse *p_response);
int strStartsWith(const char *line, const char *prefix);
typedef enum {
CME_ERROR_NON_CME = -1,
CME_SUCCESS = 0,
CME_OPERATION_NOT_ALLOWED = 3,
CME_OPERATION_NOT_SUPPORTED = 4,
CME_PH_SIM_PIN= 5,
CME_PH_FSIM_PIN = 6,
CME_PH_FSIM_PUK = 7,
CME_SIM_NOT_INSERTED =10,
CME_SIM_PIN_REQUIRED = 11,
CME_SIM_PUK_REQUIRED = 12,
CME_FAILURE = 13,
CME_SIM_BUSY = 14,
CME_SIM_WRONG = 15,
CME_INCORRECT_PASSWORD = 16,
CME_SIM_PIN2_REQUIRED = 17,
CME_SIM_PUK2_REQUIRED = 18,
CME_MEMORY_FULL = 20,
CME_INVALID_INDEX = 21,
CME_NOT_FOUND = 22,
CME_MEMORY_FAILURE = 23,
CME_STRING_TO_LONG = 24,
CME_INVALID_CHAR = 25,
CME_DIALSTR_TO_LONG = 26,
CME_INVALID_DIALCHAR = 27,
} AT_CME_Error;
AT_CME_Error at_get_cme_error(const ATResponse *p_response);
#ifdef __cplusplus
}
#endif
#endif /*ATCHANNEL_H*/

View File

@ -0,0 +1,48 @@
# -*- Autoconf -*-
# Process this file with autoconf to produce a configure script.
AC_PREREQ([2.61])
AC_INIT([quectel-CM], [1.0], [fae-support@quectel.com])
AC_CONFIG_HEADERS([config.h])
# Checks for programs.
AC_PROG_CC
# Checks for libraries.
# Checks for header files.
# Checks for typedefs, structures, and compiler characteristics.
AC_ARG_WITH(sanitized-headers,
AS_HELP_STRING([--with-sanitized-headers=DIR],
[Specify the location of the sanitized Linux headers]),
[CPPFLAGS="$CPPFLAGS -idirafter $withval"])
AC_ARG_WITH([qrtr],
AC_HELP_STRING([--with-qrtr],
[enable qrtr, building which use qrtr]))
if (test "x${with_qrtr}" = "xyes"); then
#AC_DEFINE(ENABLE_USEQTRT, 1, [Define if uses qrtr])
AC_CHECK_HEADERS([linux/qrtr.h linux/rmnet_data.h])
fi
AM_CONDITIONAL(USE_QRTR, test "x${with_qrtr}" = "xyes")
AC_ARG_WITH([msm-ipc],
AC_HELP_STRING([--with-msm-ipc],
[enable msm-ipc, building which use qrtr]))
if (test "x${with_msm_ipc}" = "xyes"); then
#AC_DEFINE(ENABLE_USEQTRT, 1, [Define if uses qrtr])
AC_CHECK_HEADERS([linux/msm_ipc.h linux/rmnet_data.h])
fi
AM_CONDITIONAL(USE_MSM_IPC, test "x${with_msm_ipc}" = "xyes")
# Checks for library functions.
# Does not strictly follow GNU Coding standards
AM_INIT_AUTOMAKE([foreign subdir-objects])
AC_CONFIG_FILES([Makefile])
AC_OUTPUT

View File

@ -0,0 +1,63 @@
#!/bin/sh
# Busybox udhcpc dispatcher script. Copyright (C) 2009 by Axel Beckert.
#
# Based on the busybox example scripts and the old udhcp source
# package default.* scripts.
RESOLV_CONF="/etc/resolv.conf"
case $1 in
bound|renew)
[ -n "$broadcast" ] && BROADCAST="broadcast $broadcast"
[ -n "$subnet" ] && NETMASK="netmask $subnet"
/sbin/ifconfig $interface $ip $BROADCAST $NETMASK
if [ -n "$router" ]; then
echo "$0: Resetting default routes"
while /sbin/route del default gw 0.0.0.0 dev $interface; do :; done
metric=0
for i in $router; do
/sbin/route add default gw $i dev $interface metric $metric
metric=$(($metric + 1))
done
fi
# Update resolver configuration file
R=""
[ -n "$domain" ] && R="domain $domain
"
for i in $dns; do
echo "$0: Adding DNS $i"
R="${R}nameserver $i
"
done
if [ -x /sbin/resolvconf ]; then
echo -n "$R" | resolvconf -a "${interface}.udhcpc"
else
echo -n "$R" > "$RESOLV_CONF"
fi
;;
deconfig)
if [ -x /sbin/resolvconf ]; then
resolvconf -d "${interface}.udhcpc"
fi
/sbin/ifconfig $interface 0.0.0.0
;;
leasefail)
echo "$0: Lease failed: $message"
;;
nak)
echo "$0: Received a NAK: $message"
;;
*)
echo "$0: Unknown udhcpc command: $1";
exit 1;
;;
esac

View File

@ -0,0 +1,61 @@
#!/bin/sh
# Busybox udhcpc dispatcher script. Copyright (C) 2009 by Axel Beckert.
#
# Based on the busybox example scripts and the old udhcp source
# package default.* scripts.
RESOLV_CONF="/etc/resolv.conf"
IPCMD=`which ip`
case $1 in
bound|renew)
$IPCMD address add broadcast $broadcast $ip/$subnet dev $interface
if [ -n "$router" ]; then
echo "$0: Resetting default routes"
while $IPCMD route del default dev $interface; do :; done
metric=0
for i in $router; do
$IPCMD route add default dev $interface via $router metric $metric
metric=$(($metric + 1))
done
fi
# Update resolver configuration file
R=""
[ -n "$domain" ] && R="domain $domain
"
for i in $dns; do
echo "$0: Adding DNS $i"
R="${R}nameserver $i
"
done
if [ -x /sbin/resolvconf ]; then
echo -n "$R" | resolvconf -a "${interface}.udhcpc"
else
echo -n "$R" > "$RESOLV_CONF"
fi
;;
deconfig)
if [ -x /sbin/resolvconf ]; then
resolvconf -d "${interface}.udhcpc"
fi
$IPCMD address flush dev $interface
;;
leasefail)
echo "$0: Lease failed: $message"
;;
nak)
echo "$0: Received a NAK: $message"
;;
*)
echo "$0: Unknown udhcpc command: $1";
exit 1;
;;
esac

View File

@ -0,0 +1,746 @@
/******************************************************************************
@file device.c
@brief QMI device dirver.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include <unistd.h>
#include <sys/types.h>
#include <fcntl.h>
#include <dirent.h>
#include <errno.h>
#include <strings.h>
#include <stdlib.h>
#include <limits.h>
#include <linux/usbdevice_fs.h>
#include <linux/types.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <net/if.h>
#include <time.h>
#include <pthread.h>
#include "QMIThread.h"
#include "ethtool-copy.h"
#define USB_CLASS_VENDOR_SPEC 0xff
#define USB_CLASS_COMM 2
#define USB_CDC_SUBCLASS_ACM 0x02
#define USB_CDC_SUBCLASS_ETHERNET 0x06
#define USB_CDC_SUBCLASS_NCM 0x0d
#define USB_CDC_SUBCLASS_MBIM 0x0e
#define USB_CLASS_WIRELESS_CONTROLLER 0xe0
#define CM_MAX_PATHLEN 256
#define CM_INVALID_VAL (~((int)0))
/* get first line from file 'fname'
* And convert the content into a hex number, then return this number */
static int file_get_value(const char *fname, int base)
{
FILE *fp = NULL;
long num;
char buff[32 + 1] = {'\0'};
char *endptr = NULL;
fp = fopen(fname, "r");
if (!fp) goto error;
if (fgets(buff, sizeof(buff), fp) == NULL)
goto error;
fclose(fp);
num = (int)strtol(buff, &endptr, base);
if (errno == ERANGE && (num == LONG_MAX || num == LONG_MIN))
goto error;
/* if there is no digit in buff */
if (endptr == buff)
goto error;
if (debug_qmi)
dbg_time("(%s) = %lx", fname, num);
return (int)num;
error:
if (fp) fclose(fp);
return CM_INVALID_VAL;
}
/*
* This function will search the directory 'dirname' and return the first child.
* '.' and '..' is ignored by default
*/
static int dir_get_child(const char *dirname, char *buff, unsigned bufsize, const char *prefix)
{
struct dirent *entptr = NULL;
DIR *dirptr;
buff[0] = 0;
dirptr = opendir(dirname);
if (!dirptr)
return -1;
while ((entptr = readdir(dirptr))) {
if (entptr->d_name[0] == '.')
continue;
if (prefix && strlen(prefix) && strncmp(entptr->d_name, prefix, strlen(prefix)))
continue;
snprintf(buff, bufsize, "%.31s", entptr->d_name);
break;
}
closedir(dirptr);
return 0;
}
static int conf_get_val(const char *fname, const char *key)
{
char buff[128] = {'\0'};
FILE *fp = fopen(fname, "r");
if (!fp)
return CM_INVALID_VAL;
while (fgets(buff, sizeof(buff)-1, fp)) {
char prefix[128] = {'\0'};
char tail[128] = {'\0'};
/* To eliminate cppcheck warnning: Assume string length is no more than 15 */
sscanf(buff, "%15[^=]=%15s", prefix, tail);
if (!strncasecmp(prefix, key, strlen(key))) {
fclose(fp);
return atoi(tail);
}
}
fclose(fp);
return CM_INVALID_VAL;
}
static void query_usb_device_info(char *path, struct usb_device_info *p) {
size_t offset = strlen(path);
memset(p, 0, sizeof(*p));
path[offset] = '\0';
strcat(path, "/idVendor");
p->idVendor = file_get_value(path, 16);
if (p->idVendor == CM_INVALID_VAL)
return;
path[offset] = '\0';
strcat(path, "/idProduct");
p->idProduct = file_get_value(path, 16);
path[offset] = '\0';
strcat(path, "/busnum");
p->busnum = file_get_value(path, 10);
path[offset] = '\0';
strcat(path, "/devnum");
p->devnum = file_get_value(path, 10);
path[offset] = '\0';
strcat(path, "/bNumInterfaces");
p->bNumInterfaces = file_get_value(path, 10);
path[offset] = '\0';
}
static void query_usb_interface_info(char *path, struct usb_interface_info *p) {
char driver[128];
size_t offset = strlen(path);
int n;
memset(p, 0, sizeof(*p));
path[offset] = '\0';
strcat(path, "/bNumEndpoints");
p->bInterfaceClass = file_get_value(path, 16);
path[offset] = '\0';
strcat(path, "/bInterfaceClass");
p->bInterfaceClass = file_get_value(path, 16);
path[offset] = '\0';
strcat(path, "/bInterfaceSubClass");
p->bInterfaceSubClass = file_get_value(path, 16);
path[offset] = '\0';
strcat(path, "/bInterfaceProtocol");
p->bInterfaceProtocol = file_get_value(path, 16);
path[offset] = '\0';
strcat(path, "/driver");
n = readlink(path, driver, sizeof(driver));
if (n > 0) {
driver[n] = 0;
if (debug_qmi) dbg_time("driver -> %s", driver);
n = strlen(driver);
while (n > 0) {
if (driver[n] == '/')
break;
n--;
}
strncpy(p->driver, &driver[n+1], sizeof(p->driver) - 1);
}
path[offset] = '\0';
}
static int detect_path_cdc_wdm_or_qcqmi(char *path, char *devname, size_t bufsize)
{
size_t offset = strlen(path);
char tmp[32];
devname[0] = 0;
if (access(path, R_OK))
return -1;
path[offset] = '\0';
strcat(path, "/GobiQMI");
if (!access(path, R_OK))
goto step_1;
path[offset] = '\0';
strcat(path, "/usbmisc");
if (!access(path, R_OK))
goto step_1;
path[offset] = '\0';
strcat(path, "/usb");
if (!access(path, R_OK))
goto step_1;
return -1;
step_1:
/* get device(qcqmiX|cdc-wdmX) */
if (debug_qmi) dbg_time("%s", path);
dir_get_child(path, tmp, sizeof(tmp), NULL);
if (tmp[0] == '\0')
return -1;
/* There is a chance that, no device(qcqmiX|cdc-wdmX) is generated. We should warn user about that! */
snprintf(devname, bufsize, "/dev/%s", tmp);
if (access(devname, R_OK | F_OK) && errno == ENOENT)
{
int major, minor;
dbg_time("access %s failed, errno: %d (%s)", devname, errno, strerror(errno));
strcat(path, "/");
strcat(path, tmp);
strcat(path, "/uevent");
major = conf_get_val(path, "MAJOR");
minor = conf_get_val(path, "MINOR");
if(major == CM_INVALID_VAL || minor == CM_INVALID_VAL)
dbg_time("get major and minor failed");
else if (mknod(devname, S_IFCHR|0666, (((major & 0xfff) << 8) | (minor & 0xff) | ((minor & 0xfff00) << 12))))
dbg_time("please mknod %s c %d %d", devname, major, minor);
}
return 0;
}
/* To detect the device info of the modem.
* return:
* FALSE -> fail
* TRUE -> ok
*/
BOOL qmidevice_detect(char *qmichannel, char *usbnet_adapter, unsigned bufsize, PROFILE_T *profile) {
struct dirent* ent = NULL;
DIR *pDir;
const char *rootdir = "/sys/bus/usb/devices";
struct {
char path[255*2];
} *pl;
pl = (typeof(pl)) malloc(sizeof(*pl));
memset(pl, 0x00, sizeof(*pl));
pDir = opendir(rootdir);
if (!pDir) {
dbg_time("opendir %s failed: %s", rootdir, strerror(errno));
goto error;
}
while ((ent = readdir(pDir)) != NULL) {
char netcard[32+1] = {'\0'};
char devname[32+5] = {'\0'}; //+strlen("/dev/")
int netIntf;
int driver_type;
if (ent->d_name[0] == 'u')
continue;
snprintf(pl->path, sizeof(pl->path), "%s/%s", rootdir, ent->d_name);
query_usb_device_info(pl->path, &profile->usb_dev);
if (profile->usb_dev.idVendor == CM_INVALID_VAL)
continue;
if (profile->usb_dev.idVendor == 0x2c7c || profile->usb_dev.idVendor == 0x05c6) {
dbg_time("Find %s/%s idVendor=0x%x idProduct=0x%x, bus=0x%03x, dev=0x%03x",
rootdir, ent->d_name, profile->usb_dev.idVendor, profile->usb_dev.idProduct,
profile->usb_dev.busnum, profile->usb_dev.devnum);
}
/* get network interface */
/* NOTICE: there is a case that, bNumberInterface=6, but the net interface is 8 */
/* toolchain-mips_24kc_gcc-5.4.0_musl donot support GLOB_BRACE */
/* RG500U's MBIM is at inteface 0 */
for (netIntf = 0; netIntf < (profile->usb_dev.bNumInterfaces + 8); netIntf++) {
snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d/net", rootdir, ent->d_name, netIntf);
dir_get_child(pl->path, netcard, sizeof(netcard), NULL);
if (netcard[0])
break;
}
if (netcard[0] == '\0') { //for centos 2.6.x
const char *n= "usb0";
const char *c = "qcqmi0";
snprintf(pl->path, sizeof(pl->path), "%s/%s:1.4/net:%s", rootdir, ent->d_name, n);
if (!access(pl->path, F_OK)) {
snprintf(pl->path, sizeof(pl->path), "%s/%s:1.4/GobiQMI:%s", rootdir, ent->d_name, c);
if (!access(pl->path, F_OK)) {
snprintf(qmichannel, bufsize, "/dev/%s", c);
snprintf(usbnet_adapter, bufsize, "%s", n);
snprintf(pl->path, sizeof(pl->path), "%s/%s:1.4", rootdir, ent->d_name);
query_usb_interface_info(pl->path, &profile->usb_intf);
break;
}
}
}
if (netcard[0] == '\0')
continue;
/* not '-i iface' */
if (usbnet_adapter[0] && strcmp(usbnet_adapter, netcard))
continue;
snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d", rootdir, ent->d_name, netIntf);
query_usb_interface_info(pl->path, &profile->usb_intf);
driver_type = get_driver_type(profile);
if (driver_type == SOFTWARE_QMI || driver_type == SOFTWARE_MBIM) {
detect_path_cdc_wdm_or_qcqmi(pl->path, devname, sizeof(devname));
}
else if (driver_type == SOFTWARE_ECM_RNDIS_NCM)
{
int atIntf = -1;
if (profile->usb_dev.idVendor == 0x2c7c) { //Quectel
switch (profile->usb_dev.idProduct) { //EC200U
case 0x0901: //EC200U
case 0x8101: //RG801H
atIntf = 2;
break;
case 0x0900: //RG500U
atIntf = 4;
break;
case 0x6026: //EC200T
case 0x6005: //EC200A
case 0x6002: //EC200S
case 0x6001: //EC100Y
atIntf = 3;
break;
default:
dbg_time("unknow at interface for USB idProduct:%04x\n", profile->usb_dev.idProduct);
break;
}
}
if (atIntf != -1) {
snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d", rootdir, ent->d_name, atIntf);
dir_get_child(pl->path, devname, sizeof(devname), "tty");
if (devname[0] && !strcmp(devname, "tty")) {
snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d/tty", rootdir, ent->d_name, atIntf);
dir_get_child(pl->path, devname, sizeof(devname), "tty");
}
}
}
if (netcard[0] && devname[0]) {
if (devname[0] == '/')
snprintf(qmichannel, bufsize, "%s", devname);
else
snprintf(qmichannel, bufsize, "/dev/%s", devname);
snprintf(usbnet_adapter, bufsize, "%s", netcard);
dbg_time("Auto find qmichannel = %s", qmichannel);
dbg_time("Auto find usbnet_adapter = %s", usbnet_adapter);
break;
}
}
closedir(pDir);
if (qmichannel[0] == '\0' || usbnet_adapter[0] == '\0') {
dbg_time("network interface '%s' or qmidev '%s' is not exist", usbnet_adapter, qmichannel);
goto error;
}
free(pl);
return TRUE;
error:
free(pl);
return FALSE;
}
int mhidevice_detect(char *qmichannel, char *usbnet_adapter, PROFILE_T *profile) {
struct dirent* ent = NULL;
DIR *pDir;
const char *rootdir_mhi[] = {"/sys/bus/mhi_q/devices", "/sys/bus/mhi/devices", NULL};
int i = 0;
char path[256];
int find = 0;
while (rootdir_mhi[i]) {
const char *rootdir = rootdir_mhi[i++];
pDir = opendir(rootdir);
if (!pDir) {
if (errno != ENOENT)
dbg_time("opendir %s failed: %s", rootdir, strerror(errno));
continue;
}
while ((ent = readdir(pDir)) != NULL) {
char netcard[32] = {'\0'};
char devname[32] = {'\0'};
int software_interface = SOFTWARE_QMI;
char *pNode = NULL;
pNode = strstr(ent->d_name, "_IP_HW0"); //0306_00.01.00_IP_HW0
if (!pNode)
continue;
snprintf(path, sizeof(path), "%s/%.32s/net", rootdir, ent->d_name);
dir_get_child(path, netcard, sizeof(netcard), NULL);
if (!netcard[0])
continue;
if (usbnet_adapter[0] && strcmp(netcard, usbnet_adapter)) //not '-i x'
continue;
if (!strcmp(rootdir, "/sys/bus/mhi/devices")) {
snprintf(path, sizeof(path), "%s/%.13s_IPCR", rootdir, ent->d_name); // 13 is sizeof(0306_00.01.00)
if (!access(path, F_OK)) {
/* we also need 'cat /dev/mhi_0306_00.01.00_pipe_14' to enable rmnet as like USB's DTR
or will get error 'requestSetEthMode QMUXResult = 0x1, QMUXError = 0x46' */
sprintf(usbnet_adapter, "%s", netcard);
sprintf(qmichannel, "qrtr-%d", 3); // 3 is sdx modem's node id
profile->software_interface = SOFTWARE_QRTR;
find = 1;
break;
}
continue;
}
snprintf(path, sizeof(path), "%s/%.13s_IPCR", rootdir, ent->d_name);
if (access(path, F_OK)) {
snprintf(path, sizeof(path), "%s/%.13s_QMI0", rootdir, ent->d_name);
if (access(path, F_OK)) {
snprintf(path, sizeof(path), "%s/%.13s_MBIM", rootdir, ent->d_name);
if (!access(path, F_OK))
software_interface = SOFTWARE_MBIM;
}
}
if (access(path, F_OK))
continue;
strncat(path, "/mhi_uci_q", sizeof(path)-1);
dir_get_child(path, devname, sizeof(devname), NULL);
if (!devname[0])
continue;
sprintf(usbnet_adapter, "%s", netcard);
sprintf(qmichannel, "/dev/%s", devname);
profile->software_interface = software_interface;
find = 1;
break;
}
closedir(pDir);
}
return find;
}
int atdevice_detect(char *atchannel, char *usbnet_adapter, PROFILE_T *profile) {
if (!access("/sys/class/net/sipa_dummy0", F_OK)) {
strcpy(usbnet_adapter, "sipa_dummy0");
snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "%s%d", "pcie", profile->pdp - 1);
}
else {
dbg_time("atdevice_detect failed");
goto error;
}
if (!access("/dev/stty_nr31", F_OK)) {
strcpy(atchannel, "/dev/stty_nr31");
profile->software_interface = SOFTWARE_ECM_RNDIS_NCM;
}
else {
goto error;
}
return 1;
error:
return 0;
}
int get_driver_type(PROFILE_T *profile)
{
/* QMI_WWAN */
if (profile->usb_intf.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
return SOFTWARE_QMI;
}
else if (profile->usb_intf.bInterfaceClass == USB_CLASS_COMM) {
switch (profile->usb_intf.bInterfaceSubClass) {
case USB_CDC_SUBCLASS_MBIM:
return SOFTWARE_MBIM;
break;
case USB_CDC_SUBCLASS_ETHERNET:
case USB_CDC_SUBCLASS_NCM:
return SOFTWARE_ECM_RNDIS_NCM;
break;
default:
break;
}
}
else if (profile->usb_intf.bInterfaceClass == USB_CLASS_WIRELESS_CONTROLLER) {
if (profile->usb_intf.bInterfaceSubClass == 1 && profile->usb_intf.bInterfaceProtocol == 3)
return SOFTWARE_ECM_RNDIS_NCM;
}
dbg_time("%s unknow bInterfaceClass=%d, bInterfaceSubClass=%d", __func__,
profile->usb_intf.bInterfaceClass, profile->usb_intf.bInterfaceSubClass);
return DRV_INVALID;
}
struct usbfs_getdriver
{
unsigned int interface;
char driver[255 + 1];
};
struct usbfs_ioctl
{
int ifno; /* interface 0..N ; negative numbers reserved */
int ioctl_code; /* MUST encode size + direction of data so the
* macros in <asm/ioctl.h> give correct values */
void *data; /* param buffer (in, or out) */
};
#define IOCTL_USBFS_DISCONNECT _IO('U', 22)
#define IOCTL_USBFS_CONNECT _IO('U', 23)
int usbfs_is_kernel_driver_alive(int fd, int ifnum)
{
struct usbfs_getdriver getdrv;
getdrv.interface = ifnum;
if (ioctl(fd, USBDEVFS_GETDRIVER, &getdrv) < 0) {
dbg_time("%s ioctl USBDEVFS_GETDRIVER failed, kernel driver may be inactive", __func__);
return 0;
}
dbg_time("%s find interface %d has match the driver %s", __func__, ifnum, getdrv.driver);
return 1;
}
void usbfs_detach_kernel_driver(int fd, int ifnum)
{
struct usbfs_ioctl operate;
operate.data = NULL;
operate.ifno = ifnum;
operate.ioctl_code = IOCTL_USBFS_DISCONNECT;
if (ioctl(fd, USBDEVFS_IOCTL, &operate) < 0) {
dbg_time("%s detach kernel driver failed", __func__);
} else {
dbg_time("%s detach kernel driver success", __func__);
}
}
void usbfs_attach_kernel_driver(int fd, int ifnum)
{
struct usbfs_ioctl operate;
operate.data = NULL;
operate.ifno = ifnum;
operate.ioctl_code = IOCTL_USBFS_CONNECT;
if (ioctl(fd, USBDEVFS_IOCTL, &operate) < 0) {
dbg_time("%s detach kernel driver failed", __func__);
} else {
dbg_time("%s detach kernel driver success", __func__);
}
}
int reattach_driver(PROFILE_T *profile)
{
int ifnum = 4;
int fd;
char devpath[128] = {'\0'};
snprintf(devpath, sizeof(devpath), "/dev/bus/usb/%03d/%03d", profile->usb_dev.busnum, profile->usb_dev.devnum);
fd = open(devpath, O_RDWR | O_NOCTTY);
if (fd < 0)
{
dbg_time("%s fail to open %s", __func__, devpath);
return -1;
}
usbfs_detach_kernel_driver(fd, ifnum);
usbfs_attach_kernel_driver(fd, ifnum);
close(fd);
return 0;
}
#define SIOCETHTOOL 0x8946
int ql_get_netcard_driver_info(const char *devname)
{
int fd = -1;
struct ethtool_drvinfo drvinfo;
struct ifreq ifr; /* ifreq suitable for ethtool ioctl */
memset(&ifr, 0, sizeof(ifr));
strcpy(ifr.ifr_name, devname);
fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd < 0) {
dbg_time("Cannot get control socket: errno(%d)(%s)", errno, strerror(errno));
return -1;
}
drvinfo.cmd = ETHTOOL_GDRVINFO;
ifr.ifr_data = (void *)&drvinfo;
if (ioctl(fd, SIOCETHTOOL, &ifr) < 0) {
dbg_time("ioctl() error: errno(%d)(%s)", errno, strerror(errno));
close(fd);
return -1;
}
dbg_time("netcard driver = %s, driver version = %s", drvinfo.driver, drvinfo.version);
close(fd);
return 0;
}
int ql_get_netcard_carrier_state(const char *devname)
{
int fd = -1;
struct ethtool_value edata;
struct ifreq ifr; /* ifreq suitable for ethtool ioctl */
memset(&ifr, 0, sizeof(ifr));
strcpy(ifr.ifr_name, devname);
fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd < 0) {
dbg_time("Cannot get control socket: errno(%d)(%s)", errno, strerror(errno));
return -1;
}
edata.cmd = ETHTOOL_GLINK;
edata.data = 0;
ifr.ifr_data = (void *)&edata;
if (ioctl(fd, SIOCETHTOOL, &ifr) < 0) {
dbg_time("ioctl('%s') error: errno(%d)(%s)", devname, errno, strerror(errno));
return -1;
}
if (!edata.data)
dbg_time("netcard carrier = %d", edata.data);
close(fd);
return edata.data;
}
static void *catch_log(void *arg)
{
PROFILE_T *profile = (PROFILE_T *)arg;
int nreads = 0;
char tbuff[256+32];
char filter[32];
size_t tsize = strlen(get_time()) + 1;
snprintf(filter, sizeof(filter), ":%d:%03d:", profile->usb_dev.busnum, profile->usb_dev.devnum);
while(1) {
nreads = read(profile->usbmon_fd, tbuff + tsize, sizeof(tbuff) - tsize - 1);
if (nreads <= 0) {
if (nreads == -1 && errno == EINTR)
continue;
break;
}
tbuff[tsize+nreads] = '\0'; // printf("%s", buff);
if (!strstr(tbuff+tsize, filter))
continue;
snprintf(tbuff, sizeof(tbuff), "%s", get_time());
tbuff[tsize-1] = ' ';
fwrite(tbuff, strlen(tbuff), 1, profile->usbmon_logfile_fp);
}
return NULL;
}
int ql_capture_usbmon_log(PROFILE_T *profile, const char *log_path)
{
char usbmon_path[256];
pthread_t pt;
pthread_attr_t attr;
if (access("/sys/module/usbmon", F_OK)) {
dbg_time("usbmon is not load, please execute \"modprobe usbmon\" or \"insmod usbmon.ko\"");
return -1;
}
if (access("/sys/kernel/debug/usb", F_OK)) {
dbg_time("debugfs is not mount, please execute \"mount -t debugfs none_debugs /sys/kernel/debug\"");
return -1;
}
snprintf(usbmon_path, sizeof(usbmon_path), "/sys/kernel/debug/usb/usbmon/%du", profile->usb_dev.busnum);
profile->usbmon_fd = open(usbmon_path, O_RDONLY);
if (profile->usbmon_fd < 0) {
dbg_time("open %s error(%d) (%s)", usbmon_path, errno, strerror(errno));
return -1;
}
snprintf(usbmon_path, sizeof(usbmon_path), "cat /sys/kernel/debug/usb/devices >> %s", log_path);
if (system(usbmon_path) == -1) {};
profile->usbmon_logfile_fp = fopen(log_path, "wb");
if (!profile->usbmon_logfile_fp) {
dbg_time("open %s error(%d) (%s)", log_path, errno, strerror(errno));
close(profile->usbmon_fd);
profile->usbmon_fd = -1;
return -1;
}
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
pthread_create(&pt, &attr, catch_log, (void *)profile);
return 0;
}
void ql_stop_usbmon_log(PROFILE_T *profile) {
if (profile->usbmon_fd > 0)
close(profile->usbmon_fd);
if (profile->usbmon_logfile_fp)
fclose(profile->usbmon_logfile_fp);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,956 @@
/******************************************************************************
@file main.c
@brief The entry program.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 -2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include "QMIThread.h"
#include <sys/wait.h>
#include <sys/utsname.h>
#include <sys/time.h>
#include <dirent.h>
#include "util.h"
//#define CONFIG_PID_FILE_FORMAT "/var/run/quectel-CM-%s.pid" //for example /var/run/quectel-CM-wwan0.pid
static PROFILE_T s_profile;
int debug_qmi = 0;
int qmidevice_control_fd[2];
static int signal_control_fd[2];
int g_donot_exit_when_modem_hangup = 0;
extern int ql_ifconfig(int argc, char *argv[]);
extern int ql_get_netcard_driver_info(const char*);
extern int ql_capture_usbmon_log(PROFILE_T *profile, const char *log_path);
extern void ql_stop_usbmon_log(PROFILE_T *profile);
//UINT ifc_get_addr(const char *ifname);
static int s_link = -1;
static void usbnet_link_change(int link, PROFILE_T *profile) {
if (s_link == link)
return;
s_link = link;
if (!(link & (1<<IpFamilyV4)))
memset(&profile->ipv4, 0, sizeof(IPV4_T));
if (!(link & (1<<IpFamilyV6)))
memset(&profile->ipv6, 0, sizeof(IPV6_T));
if (link) {
udhcpc_start(profile);
} else {
udhcpc_stop(profile);
}
}
static int check_ipv4_address(PROFILE_T *profile) {
uint32_t oldAddress = profile->ipv4.Address;
if (profile->request_ops == &mbim_request_ops)
return 1; //we will get a new ipv6 address per requestGetIPAddress()
if (profile->request_ops == &atc_request_ops) {
if (!profile->udhcpc_ip) return 1;
oldAddress = profile->udhcpc_ip;
}
if (profile->request_ops->requestGetIPAddress(profile, IpFamilyV4) == 0) {
if (profile->ipv4.Address != oldAddress || debug_qmi) {
unsigned char *l = (unsigned char *)&oldAddress;
unsigned char *r = (unsigned char *)&profile->ipv4.Address;
dbg_time("localIP: %d.%d.%d.%d VS remoteIP: %d.%d.%d.%d",
l[3], l[2], l[1], l[0], r[3], r[2], r[1], r[0]);
}
return (profile->ipv4.Address == oldAddress);
}
return 0;
}
static void main_send_event_to_qmidevice(int triger_event) {
if (write(qmidevice_control_fd[0], &triger_event, sizeof(triger_event)) == -1) {};
}
static void send_signo_to_main(int signo) {
if (write(signal_control_fd[0], &signo, sizeof(signo)) == -1) {};
}
void qmidevice_send_event_to_main(int triger_event) {
if (write(qmidevice_control_fd[1], &triger_event, sizeof(triger_event)) == -1) {};
}
void qmidevice_send_event_to_main_ext(int triger_event, void *data, unsigned len) {
if (write(qmidevice_control_fd[1], &triger_event, sizeof(triger_event)) == -1) {};
if (write(qmidevice_control_fd[1], data, len) == -1) {};
}
#define MAX_PATH 256
static int ls_dir(const char *dir, int (*match)(const char *dir, const char *file, void *argv[]), void *argv[])
{
DIR *pDir;
struct dirent* ent = NULL;
int match_times = 0;
pDir = opendir(dir);
if (pDir == NULL) {
dbg_time("Cannot open directory: %s, errno: %d (%s)", dir, errno, strerror(errno));
return 0;
}
while ((ent = readdir(pDir)) != NULL) {
match_times += match(dir, ent->d_name, argv);
}
closedir(pDir);
return match_times;
}
static int is_same_linkfile(const char *dir, const char *file, void *argv[])
{
const char *qmichannel = (const char *)argv[1];
char linkname[MAX_PATH*2+6];
char filename[MAX_PATH];
int linksize;
snprintf(linkname, sizeof(linkname), "%.256s/%s", dir, file);
linksize = readlink(linkname, filename, sizeof(filename));
if (linksize <= 0)
return 0;
filename[linksize] = 0;
if (strcmp(filename, qmichannel))
return 0;
dbg_time("%s -> %s", linkname, filename);
return 1;
}
static int is_brother_process(const char *dir, const char *file, void *argv[])
{
//const char *myself = (const char *)argv[0];
char linkname[MAX_PATH*2+6];
char filename[MAX_PATH];
int linksize;
int i = 0, kill_timeout = 15;
pid_t pid;
//dbg_time("%s", file);
while (file[i]) {
if (!isdigit(file[i]))
break;
i++;
}
if (file[i]) {
//dbg_time("%s not digit", file);
return 0;
}
snprintf(linkname, sizeof(linkname), "%s/%s/exe", dir, file);
linksize = readlink(linkname, filename, sizeof(filename));
if (linksize <= 0)
return 0;
filename[linksize] = 0;
pid = atoi(file);
if (pid >= getpid())
return 0;
snprintf(linkname, sizeof(linkname), "%s/%s/fd", dir, file);
if (!ls_dir(linkname, is_same_linkfile, argv))
return 0;
dbg_time("%s/%s/exe -> %s", dir, file, filename);
while (kill_timeout-- && !kill(pid, 0))
{
kill(pid, SIGTERM);
sleep(1);
}
if (!kill(pid, 0))
{
dbg_time("force kill %s/%s/exe -> %s", dir, file, filename);
kill(pid, SIGKILL);
sleep(1);
}
return 1;
}
static int kill_brothers(const char *qmichannel)
{
char myself[MAX_PATH];
int filenamesize;
void *argv[2] = {myself, (void *)qmichannel};
filenamesize = readlink("/proc/self/exe", myself, sizeof(myself));
if (filenamesize <= 0)
return 0;
myself[filenamesize] = 0;
if (ls_dir("/proc", is_brother_process, argv))
sleep(1);
return 0;
}
static int kill_data_call_pdp(int pdp, char *self) {
int pid;
char *p = NULL;
p = self;
while (*self) {
if (*self == '/')
p = self+1;
self++;
}
pid = getpid_by_pdp(pdp, p);
if (pid > 0) {
dbg_time("send SIGINT to process %d", pid);
return kill(pid, SIGINT);
}
return -1;
}
static void ql_sigaction(int signo) {
if (SIGALRM == signo)
send_signo_to_main(SIG_EVENT_START);
else
{
g_donot_exit_when_modem_hangup = 0;
send_signo_to_main(SIG_EVENT_STOP);
main_send_event_to_qmidevice(SIG_EVENT_STOP); //main may be wating qmi response
}
}
static int usage(const char *progname) {
dbg_time("Usage: %s [options]", progname);
dbg_time("-s [apn [user password auth]] Set apn/user/password/auth get from your network provider. auth: 1~pap, 2~chap, 3~MsChapV2");
dbg_time("-p pincode Verify sim card pin if sim card is locked");
dbg_time("-p [quectel-][qmi|mbim]-proxy Request to use proxy");
dbg_time("-f logfilename Save log message of this program to file");
dbg_time("-u usbmonlog filename Save usbmon log to file");
dbg_time("-i interface Specify which network interface to setup data call when multi-modems exits");
dbg_time("-4 Setup IPv4 data call (default)");
dbg_time("-6 Setup IPv6 data call");
dbg_time("-n pdn Specify which pdn to setup data call (default 1 for QMI, 0 for MBIM)");
dbg_time("-k pdn Specify which pdn to hangup data call (by send SIGINT to 'quectel-CM -n pdn')");
dbg_time("-m iface-idx Bind QMI data call to wwan0_<iface idx> when QMAP used. E.g '-n 7 -m 1' bind pdn-7 data call to wwan0_1");
dbg_time("-b Enable network interface bridge function (default 0)");
dbg_time("-v Verbose log mode, for debug purpose.");
dbg_time("-d Obtain the IP address and dns through qmi");
dbg_time("[Examples]");
dbg_time("Example 1: %s ", progname);
dbg_time("Example 2: %s -s 3gnet ", progname);
dbg_time("Example 3: %s -s 3gnet carl 1234 1 -p 1234 -f gobinet_log.txt", progname);
return 0;
}
static int qmi_main(PROFILE_T *profile)
{
int triger_event = 0;
int signo;
#ifdef CONFIG_SIM
SIM_Status SIMStatus = SIM_ABSENT;
#endif
UCHAR PSAttachedState = 0;
UCHAR IPv4ConnectionStatus = QWDS_PKT_DATA_UNKNOW;
UCHAR IPv6ConnectionStatus = QWDS_PKT_DATA_UNKNOW;
unsigned SetupCallFail = 0;
unsigned long SetupCallAllowTime = clock_msec();
#ifdef REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS
unsigned PsAttachFail = 0;
unsigned long PsAttachTime = clock_msec();
#endif
int qmierr = 0;
const struct request_ops *request_ops = profile ->request_ops;
pthread_t gQmiThreadID = 0;
//sudo apt-get install udhcpc
//sudo apt-get remove ModemManager
if (profile->reattach_flag) {
if (!reattach_driver(profile))
sleep(2);
}
/* try to recreate FDs*/
if (socketpair( AF_LOCAL, SOCK_STREAM, 0, signal_control_fd) < 0 ) {
dbg_time("%s Faild to create main_control_fd: %d (%s)", __func__, errno, strerror(errno));
return -1;
}
if ( socketpair( AF_LOCAL, SOCK_STREAM, 0, qmidevice_control_fd ) < 0 ) {
dbg_time("%s Failed to create thread control socket pair: %d (%s)", __func__, errno, strerror(errno));
return 0;
}
if ((profile->qmap_mode == 0 || profile->qmap_mode == 1)
&& (!profile->proxy[0] || strstr(profile->qmichannel, "_IPCR"))) {
kill_brothers(profile->qmichannel);
}
if (pthread_create( &gQmiThreadID, 0, profile->qmi_ops->read, (void *)profile) != 0) {
dbg_time("%s Failed to create QMIThread: %d (%s)", __func__, errno, strerror(errno));
return 0;
}
if ((read(qmidevice_control_fd[0], &triger_event, sizeof(triger_event)) != sizeof(triger_event))
|| (triger_event != RIL_INDICATE_DEVICE_CONNECTED)) {
dbg_time("%s Failed to init QMIThread: %d (%s)", __func__, errno, strerror(errno));
return 0;
}
if (profile->qmi_ops->init && profile->qmi_ops->init(profile)) {
dbg_time("%s Failed to qmi init: %d (%s)", __func__, errno, strerror(errno));
return 0;
}
if (request_ops->requestBaseBandVersion)
request_ops->requestBaseBandVersion(profile);
if (request_ops->requestSetEthMode)
request_ops->requestSetEthMode(profile);
if (request_ops->requestSetLoopBackState && profile->loopback_state) {
qmierr = request_ops->requestSetLoopBackState(profile->loopback_state, profile->replication_factor);
if (qmierr != QMI_ERR_INVALID_QMI_CMD) //X20 return this error
profile->loopback_state = 0; //wait for RIL_UNSOL_LOOPBACK_CONFIG_IND
}
if (request_ops->requestGetSIMStatus) {
qmierr = request_ops->requestGetSIMStatus(&SIMStatus);
while (qmierr == QMI_ERR_OP_DEVICE_UNSUPPORTED) {
sleep(1);
qmierr = request_ops->requestGetSIMStatus(&SIMStatus);
}
if ((SIMStatus == SIM_PIN) && profile->pincode && request_ops->requestEnterSimPin) {
request_ops->requestEnterSimPin(profile->pincode);
}
}
if (SIMStatus == SIM_READY) {
if (request_ops->requestGetICCID)
request_ops->requestGetICCID();
if (request_ops->requestGetIMSI)
request_ops->requestGetIMSI();
}
if (request_ops->requestGetProfile)
request_ops->requestGetProfile(profile);
if (request_ops->requestSetProfile && (profile->apn || profile->user || profile->password)) {
if (request_ops->requestSetProfile(profile) == 1) {
#ifdef REBOOT_SIM_CARD_WHEN_APN_CHANGE //enable at only when customer asked
if (request_ops->requestRadioPower) {
request_ops->requestRadioPower(0);
request_ops->requestRadioPower(1);
}
#endif
}
}
request_ops->requestRegistrationState(&PSAttachedState);
#ifdef CONFIG_ENABLE_QOS
request_ops->requestRegisterQos(profile);
#endif
#if 1 //USB disconnnect and re-connect, but not reboot modem, will get this bug
if (profile->enable_ipv4
&& profile->request_ops == &atc_request_ops
&& !request_ops->requestQueryDataCall(&IPv4ConnectionStatus, IpFamilyV4)
&& IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED) {
request_ops->requestDeactivateDefaultPDP(profile, IpFamilyV4);
}
#endif
send_signo_to_main(SIG_EVENT_CHECK);
while (1)
{
struct pollfd pollfds[] = {{signal_control_fd[1], POLLIN, 0}, {qmidevice_control_fd[0], POLLIN, 0}};
int ne, ret, nevents = sizeof(pollfds)/sizeof(pollfds[0]);
do {
ret = poll(pollfds, nevents, 15*1000);
} while ((ret < 0) && (errno == EINTR));
if (ret == 0)
{
send_signo_to_main(SIG_EVENT_CHECK);
continue;
}
if (ret <= 0) {
dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno));
goto __main_quit;
}
for (ne = 0; ne < nevents; ne++) {
int fd = pollfds[ne].fd;
short revents = pollfds[ne].revents;
if (revents & (POLLERR | POLLHUP | POLLNVAL)) {
dbg_time("%s poll err/hup", __func__);
dbg_time("epoll fd = %d, events = 0x%04x", fd, revents);
main_send_event_to_qmidevice(RIL_REQUEST_QUIT);
if (revents & POLLHUP)
goto __main_quit;
}
if ((revents & POLLIN) == 0)
continue;
if (fd == signal_control_fd[1])
{
if (read(fd, &signo, sizeof(signo)) == sizeof(signo))
{
alarm(0);
switch (signo)
{
case SIG_EVENT_START:
if (PSAttachedState != 1 && profile->loopback_state == 0)
break;
if (SetupCallAllowTime > clock_msec()) {
alarm((SetupCallAllowTime - clock_msec()+999)/1000);
break;
}
if (profile->enable_ipv4 && IPv4ConnectionStatus != QWDS_PKT_DATA_CONNECTED) {
qmierr = request_ops->requestSetupDataCall(profile, IpFamilyV4);
if ((qmierr > 0) && profile->user && profile->user[0] && profile->password && profile->password[0]) {
int old_auto = profile->auth;
//may be fail because wrong auth mode, try pap->chap, or chap->pap
profile->auth = (profile->auth == 1) ? 2 : 1;
qmierr = request_ops->requestSetupDataCall(profile, IpFamilyV4);
if (qmierr)
profile->auth = old_auto; //still fail, restore old auth moe
}
if (!qmierr) {
qmierr = request_ops->requestGetIPAddress(profile, IpFamilyV4);
if (!qmierr)
IPv4ConnectionStatus = QWDS_PKT_DATA_CONNECTED;
}
}
if (profile->enable_ipv6 && IPv6ConnectionStatus != QWDS_PKT_DATA_CONNECTED) {
if (profile->enable_ipv4 && profile->request_ops != &qmi_request_ops) {
IPv6ConnectionStatus = IPv4ConnectionStatus;
}
else {
qmierr = request_ops->requestSetupDataCall(profile, IpFamilyV6);
if (!qmierr) {
qmierr = request_ops->requestGetIPAddress(profile, IpFamilyV6);
if (!qmierr)
IPv6ConnectionStatus = QWDS_PKT_DATA_CONNECTED;
}
}
}
if ((profile->enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)
|| (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)) {
const unsigned allow_time[] = {5, 10, 20, 40, 60};
if (SetupCallFail < (sizeof(allow_time)/sizeof(unsigned)))
SetupCallAllowTime = allow_time[SetupCallFail];
else
SetupCallAllowTime = 60;
SetupCallFail++;
dbg_time("try to requestSetupDataCall %ld second later", SetupCallAllowTime);
alarm(SetupCallAllowTime);
SetupCallAllowTime = SetupCallAllowTime*1000 + clock_msec();
}
else if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED || IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) {
SetupCallFail = 0;
SetupCallAllowTime = clock_msec();
}
break;
case SIG_EVENT_CHECK:
if (request_ops->requestGetSignalInfo)
request_ops->requestGetSignalInfo();
if (request_ops->requestGetCellInfoList)
request_ops->requestGetCellInfoList();
if (request_ops->requestGetCoexWWANState)
request_ops->requestGetCoexWWANState();
if (PSAttachedState != 1)
request_ops->requestRegistrationState(&PSAttachedState);
#ifdef REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS
if (PSAttachedState) {
PsAttachTime = clock_msec();
PsAttachFail = 0;
}
else {
unsigned long diff = (clock_msec() - PsAttachTime) / 1000;
unsigned long threshold = REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS << PsAttachFail;
if (diff > threshold || diff > 960) {
//interval time is 60 -> 120 - > 240 - > 480 -> 960
PsAttachTime = clock_msec();
PsAttachFail++;
if (request_ops->requestRadioPower) {
request_ops->requestRadioPower(0);
request_ops->requestRadioPower(1);
}
}
}
#endif
if (profile->enable_ipv4 && IPv4ConnectionStatus != QWDS_PKT_DATA_DISCONNECTED
&& !request_ops->requestQueryDataCall(&IPv4ConnectionStatus, IpFamilyV4))
{
if (QWDS_PKT_DATA_CONNECTED == IPv4ConnectionStatus && profile->ipv4.Address == 0) {
//killall -9 quectel-CM for MBIM and ATC call
qmierr = request_ops->requestGetIPAddress(profile, IpFamilyV4);
if (qmierr)
IPv4ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED;
}
//local ip is different with remote ip
if (QWDS_PKT_DATA_CONNECTED == IPv4ConnectionStatus && check_ipv4_address(profile) == 0) {
request_ops->requestDeactivateDefaultPDP(profile, IpFamilyV4);
IPv4ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED;
}
}
else {
IPv4ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED;
}
if (profile->enable_ipv6 && IPv6ConnectionStatus != QWDS_PKT_DATA_DISCONNECTED) {
if (profile->enable_ipv4 && profile->request_ops != &qmi_request_ops) {
IPv6ConnectionStatus = IPv4ConnectionStatus;
}
else {
request_ops->requestQueryDataCall(&IPv6ConnectionStatus, IpFamilyV6);
}
}
else {
IPv6ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED;
}
if (IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED) {
usbnet_link_change(0, profile);
}
else if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED || IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) {
int link = 0;
if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED)
link |= (1<<IpFamilyV4);
if (IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED)
link |= (1<<IpFamilyV6);
usbnet_link_change(link, profile);
}
if ((profile->enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)
|| (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)) {
send_signo_to_main(SIG_EVENT_START);
}
break;
case SIG_EVENT_STOP:
if (profile->enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED) {
request_ops->requestDeactivateDefaultPDP(profile, IpFamilyV4);
}
if (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) {
if (profile->enable_ipv4 && profile->request_ops != &qmi_request_ops) {
}
else {
request_ops->requestDeactivateDefaultPDP(profile, IpFamilyV6);
}
}
usbnet_link_change(0, profile);
if (profile->qmi_ops->deinit)
profile->qmi_ops->deinit();
main_send_event_to_qmidevice(RIL_REQUEST_QUIT);
goto __main_quit;
break;
default:
break;
}
}
}
if (fd == qmidevice_control_fd[0]) {
if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) {
switch (triger_event) {
case RIL_INDICATE_DEVICE_DISCONNECTED:
usbnet_link_change(0, profile);
goto __main_quit;
break;
case RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED:
request_ops->requestRegistrationState(&PSAttachedState);
if (PSAttachedState == 1) {
if ((profile->enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)
|| (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)) {
send_signo_to_main(SIG_EVENT_START);
}
} else {
SetupCallAllowTime = clock_msec();
}
break;
case RIL_UNSOL_DATA_CALL_LIST_CHANGED:
if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED || IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) {
SetupCallAllowTime = clock_msec() + 1000; //from connect -> disconnect, do not re-dail immediately, wait network stable
}
send_signo_to_main(SIG_EVENT_CHECK);
break;
case MODEM_REPORT_RESET_EVENT:
{
dbg_time("main recv MODEM RESET SIGNAL");
main_send_event_to_qmidevice(RIL_REQUEST_QUIT);
g_donot_exit_when_modem_hangup = 1;
goto __main_quit;
}
break;
case RIL_UNSOL_LOOPBACK_CONFIG_IND:
{
QMI_WDA_SET_LOOPBACK_CONFIG_IND_MSG SetLoopBackInd;
if (read(fd, &SetLoopBackInd, sizeof(SetLoopBackInd)) == sizeof(SetLoopBackInd)) {
profile->loopback_state = SetLoopBackInd.loopback_state.TLVVaule;
profile->replication_factor = le32_to_cpu(SetLoopBackInd.replication_factor.TLVVaule);
dbg_time("SetLoopBackInd: loopback_state=%d, replication_factor=%u",
profile->loopback_state, profile->replication_factor);
if (profile->loopback_state)
send_signo_to_main(SIG_EVENT_START);
}
}
break;
#ifdef CONFIG_REG_QOS_IND
case RIL_UNSOL_GLOBAL_QOS_FLOW_IND_QOS_ID:
{
UINT qos_id = 0;
if (read(fd, &qos_id, sizeof(qos_id)) == sizeof(qos_id)) {
profile->qos_id = qos_id;
}
}
break;
#endif
default:
break;
}
}
}
}
}
__main_quit:
usbnet_link_change(0, profile);
if (gQmiThreadID && pthread_join(gQmiThreadID, NULL)) {
dbg_time("%s Error joining to listener thread (%s)", __func__, strerror(errno));
}
close(signal_control_fd[0]);
close(signal_control_fd[1]);
close(qmidevice_control_fd[0]);
close(qmidevice_control_fd[1]);
dbg_time("%s exit", __func__);
return 0;
}
static int quectel_CM(PROFILE_T *profile)
{
int ret = 0;
char qmichannel[32] = {'\0'};
char usbnet_adapter[32] = {'\0'};
if (profile->expect_adapter[0])
strncpy(usbnet_adapter, profile->expect_adapter, sizeof(usbnet_adapter));
if (qmidevice_detect(qmichannel, usbnet_adapter, sizeof(qmichannel), profile)) {
profile->hardware_interface = HARDWARE_USB;
}
else if (mhidevice_detect(qmichannel, usbnet_adapter, profile)) {
profile->hardware_interface = HARDWARE_PCIE;
}
else if (atdevice_detect(qmichannel, usbnet_adapter, profile)) {
profile->hardware_interface = HARDWARE_PCIE;
}
#ifdef CONFIG_QRTR
else if (1) {
strcpy(qmichannel, "qrtr");
strcpy(usbnet_adapter, "rmnet_mhi0");
profile->hardware_interface = HARDWARE_PCIE;
profile->software_interface = SOFTWARE_QRTR;
}
#endif
else {
dbg_time("qmidevice_detect failed");
goto error;
}
strncpy(profile->qmichannel, qmichannel, sizeof(profile->qmichannel));
strncpy(profile->usbnet_adapter, usbnet_adapter, sizeof(profile->usbnet_adapter));
ql_get_netcard_driver_info(profile->usbnet_adapter);
if ((profile->hardware_interface == HARDWARE_USB) && profile->usblogfile)
ql_capture_usbmon_log(profile, profile->usblogfile);
if (profile->hardware_interface == HARDWARE_USB) {
profile->software_interface = get_driver_type(profile);
}
ql_qmap_mode_detect(profile);
if (profile->software_interface == SOFTWARE_MBIM) {
dbg_time("Modem works in MBIM mode");
profile->request_ops = &mbim_request_ops;
profile->qmi_ops = &mbim_dev_ops;
if (!profile->apn || !profile->apn[0]) {
//see FAE-51804 FAE-59811
dbg_time("When MBIM mode, must specify APN with '-s', or setup data call may fail!");
exit(-404); //if no such issue on your side, please comment this
}
ret = qmi_main(profile);
}
else if (profile->software_interface == SOFTWARE_QMI) {
dbg_time("Modem works in QMI mode");
profile->request_ops = &qmi_request_ops;
if (qmidev_is_gobinet(profile->qmichannel))
profile->qmi_ops = &gobi_qmidev_ops;
else
profile->qmi_ops = &qmiwwan_qmidev_ops;
qmidev_send = profile->qmi_ops->send;
ret = qmi_main(profile);
}
else if (profile->software_interface == SOFTWARE_ECM_RNDIS_NCM) {
dbg_time("Modem works in ECM_RNDIS_NCM mode");
profile->request_ops = &atc_request_ops;
profile->qmi_ops = &atc_dev_ops;
ret = qmi_main(profile);
}
#ifdef CONFIG_QRTR
else if (profile->software_interface == SOFTWARE_QRTR) {
dbg_time("Modem works in QRTR mode");
profile->request_ops = &qmi_request_ops;
profile->qmi_ops = &qrtr_qmidev_ops;
qmidev_send = profile->qmi_ops->send;
ret = qmi_main(profile);
}
#endif
else {
dbg_time("unsupport software_interface %d", profile->software_interface);
}
ql_stop_usbmon_log(profile);
error:
return ret;
}
static int parse_user_input(int argc, char **argv, PROFILE_T *profile) {
int opt = 1;
profile->pdp = CONFIG_DEFAULT_PDP;
profile->profile_index = CONFIG_DEFAULT_PDP;
if (!strcmp(argv[argc-1], "&"))
argc--;
#define has_more_argv() ((opt < argc) && (argv[opt][0] != '-'))
while (opt < argc) {
if (argv[opt][0] != '-') {
return usage(argv[0]);
}
switch (argv[opt++][1])
{
case 's':
profile->apn = profile->user = profile->password = "";
if (has_more_argv()) {
profile->apn = argv[opt++];
}
if (has_more_argv()) {
profile->user = argv[opt++];
}
if (has_more_argv()) {
profile->password = argv[opt++];
if (profile->password && profile->password[0])
profile->auth = 2; //default chap, customers may miss auth
}
if (has_more_argv()) {
const char *auth = argv[opt++];
if (!strcmp(auth, "0") || !strcasecmp(auth, "none")) {
profile->auth = 0;
}
else if (!strcmp(auth, "1") || !strcasecmp(auth, "pap")) {
profile->auth = 1;
}
else if (!strcmp(auth, "2") || !strcasecmp(auth, "chap")) {
profile->auth = 2;
}
else if (!strcmp(auth, "3") || !strcasecmp(auth, "MsChapV2")) {
profile->auth = 3;
}
else {
dbg_time("unknow auth '%s'", auth);
return usage(argv[0]);
}
}
break;
case 'p':
if (has_more_argv()) {
const char *arg = argv[opt++];
if (!strcmp(arg, QUECTEL_QMI_PROXY) || !strcmp(arg, QUECTEL_MBIM_PROXY)
|| !strcmp(arg, LIBQMI_PROXY) || !strcmp(arg, LIBMBIM_PROXY) || !strcmp(arg, QUECTEL_ATC_PROXY)) {
strncpy(profile->proxy, arg, sizeof(profile->proxy) - 1);
}
else if ((999 < atoi(arg)) && (atoi(arg) < 10000)) {
profile->pincode = arg;
}
else {
dbg_time("unknow -p '%s'", arg);
return usage(argv[0]);
}
}
break;
case 'm':
if (has_more_argv())
profile->muxid = argv[opt++][0] - '0' + 0x80;
break;
case 'n':
if (has_more_argv())
profile->pdp = argv[opt++][0] - '0';
break;
case 'f':
if (has_more_argv()) {
profile->logfile = argv[opt++];
}
break;
case 'i':
if (has_more_argv()) {
strncpy(profile->expect_adapter, argv[opt++], sizeof(profile->expect_adapter) - 1);
}
break;
case 'v':
debug_qmi = 1;
break;
case 'l':
if (has_more_argv()) {
profile->replication_factor = atoi(argv[opt++]);
if (profile->replication_factor > 0) {
profile->loopback_state = 1;
}
}
break;
case '4':
profile->enable_ipv4 = 1;
break;
case '6':
profile->enable_ipv6 = 1;
break;
case 'd':
profile->no_dhcp = 1;
break;
case 'u':
if (has_more_argv()) {
profile->usblogfile = argv[opt++];
}
break;
case 'b':
profile->enable_bridge = 1;
break;
case 'k':
if (has_more_argv()) {
profile->kill_pdp = argv[opt++][0] - '0';
}
break;
default:
return usage(argv[0]);
break;
}
}
if (profile->enable_ipv4 != 1 && profile->enable_ipv6 != 1) { // default enable IPv4
profile->enable_ipv4 = 1;
}
return 1;
}
int main(int argc, char *argv[])
{
int ret;
PROFILE_T *ctx = &s_profile;
dbg_time("QConnectManager_Linux_V1.6.5");
ret = parse_user_input(argc, argv, ctx);
if (!ret)
return ret;
if (ctx->kill_pdp) {
return kill_data_call_pdp(ctx->kill_pdp, argv[0]);
}
if (ctx->logfile) {
logfilefp = fopen(ctx->logfile, "a+");
if (!logfilefp) {
dbg_time("Fail to open %s, errno: %d(%s)", ctx->logfile, errno, strerror(errno));
}
}
signal(SIGINT, ql_sigaction);
signal(SIGTERM, ql_sigaction);
signal(SIGALRM, ql_sigaction);
do {
ret = quectel_CM(ctx);
if (g_donot_exit_when_modem_hangup > 0)
sleep(3);
} while (g_donot_exit_when_modem_hangup > 0);
if (logfilefp) {
fclose(logfilefp);
}
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,52 @@
#ifndef __QUECTEL_ENDIAN_H__
#define __QUECTEL_ENDIAN_H__
#include <endian.h>
#ifndef htole32
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define htole16(x) (uint16_t)(x)
#define le16toh(x) (uint16_t)(x)
#define letoh16(x) (uint16_t)(x)
#define htole32(x) (uint32_t)(x)
#define le32toh(x) (uint32_t)(x)
#define letoh32(x) (uint32_t)(x)
#define htole64(x) (uint64_t)(x)
#define le64toh(x) (uint64_t)(x)
#define letoh64(x) (uint64_t)(x)
#else
static __inline uint16_t __bswap16(uint16_t __x) {
return (__x<<8) | (__x>>8);
}
static __inline uint32_t __bswap32(uint32_t __x) {
return (__x>>24) | (__x>>8&0xff00) | (__x<<8&0xff0000) | (__x<<24);
}
static __inline uint64_t __bswap64(uint64_t __x) {
return (__bswap32(__x)+0ULL<<32) | (__bswap32(__x>>32));
}
#define htole16(x) __bswap16(x)
#define le16toh(x) __bswap16(x)
#define letoh16(x) __bswap16(x)
#define htole32(x) __bswap32(x)
#define le32toh(x) __bswap32(x)
#define letoh32(x) __bswap32(x)
#define htole64(x) __bswap64(x)
#define le64toh(x) __bswap64(x)
#define letoh64(x) __bswap64(x)
#endif
#endif
#define le16_to_cpu(x) le16toh((uint16_t)(x))
#define le32_to_cpu(x) le32toh((uint32_t)(x))
#define le64_to_cpu(x) le64toh((uint64_t)(x))
#define cpu_to_le16(x) htole16((uint16_t)(x))
#define cpu_to_le32(x) htole32((uint32_t)(x))
#define cpu_to_le64(x) htole64((uint64_t)(x))
static __inline uint32_t ql_swap32(uint32_t __x) {
return (__x>>24) | (__x>>8&0xff00) | (__x<<8&0xff0000) | (__x<<24);
}
#endif //__QUECTEL_ENDIAN_H__

View File

@ -0,0 +1,38 @@
#ifndef __QUECTEL_LIST_H__
#define __QUECTEL_LIST_H__
struct qlistnode
{
struct qlistnode *next;
struct qlistnode *prev;
};
#define qnode_to_item(node, container, member) \
(container *) (((char*) (node)) - offsetof(container, member))
#define qlist_for_each(node, list) \
for (node = (list)->next; node != (list); node = node->next)
#define qlist_empty(list) ((list) == (list)->next)
#define qlist_head(list) ((list)->next)
#define qlist_tail(list) ((list)->prev)
static void qlist_init(struct qlistnode *node)
{
node->next = node;
node->prev = node;
}
static void qlist_add_tail(struct qlistnode *head, struct qlistnode *item)
{
item->next = head;
item->prev = head->prev;
head->prev->next = item;
head->prev = item;
}
static void qlist_remove(struct qlistnode *item)
{
item->next->prev = item->prev;
item->prev->next = item->next;
}
#endif

View File

@ -0,0 +1,402 @@
/******************************************************************************
@file qmap_bridge_mode.c
@brief Connectivity bridge manager.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include "QMIThread.h"
static size_t ql_fread(const char *filename, void *buf, size_t size) {
FILE *fp = fopen(filename , "r");
size_t n = 0;
memset(buf, 0x00, size);
if (fp) {
n = fread(buf, 1, size, fp);
if (n <= 0 || n == size) {
dbg_time("warnning: fail to fread(%s), fread=%zu, buf_size=%zu: (%s)", filename, n, size, strerror(errno));
}
fclose(fp);
}
return n > 0 ? n : 0;
}
static size_t ql_fwrite(const char *filename, const void *buf, size_t size) {
FILE *fp = fopen(filename , "w");
size_t n = 0;
if (fp) {
n = fwrite(buf, 1, size, fp);
if (n != size) {
dbg_time("warnning: fail to fwrite(%s), fwrite=%zu, buf_size=%zu: (%s)", filename, n, size, strerror(errno));
}
fclose(fp);
}
return n > 0 ? n : 0;
}
int ql_bridge_mode_detect(PROFILE_T *profile) {
const char *ifname = profile->qmapnet_adapter[0] ? profile->qmapnet_adapter : profile->usbnet_adapter;
const char *driver;
char bridge_mode[128];
char bridge_ipv4[128];
char ipv4[128];
char buf[64];
size_t n;
int in_bridge = 0;
driver = profile->driver_name;
snprintf(bridge_mode, sizeof(bridge_mode), "/sys/class/net/%s/bridge_mode", ifname);
snprintf(bridge_ipv4, sizeof(bridge_ipv4), "/sys/class/net/%s/bridge_ipv4", ifname);
if (access(bridge_ipv4, R_OK)) {
if (errno != ENOENT) {
dbg_time("fail to access %s, errno: %d (%s)", bridge_mode, errno, strerror(errno));
return 0;
}
snprintf(bridge_mode, sizeof(bridge_mode), "/sys/module/%s/parameters/bridge_mode", driver);
snprintf(bridge_ipv4, sizeof(bridge_ipv4), "/sys/module/%s/parameters/bridge_ipv4", driver);
if (access(bridge_mode, R_OK)) {
if (errno != ENOENT) {
dbg_time("fail to access %s, errno: %d (%s)", bridge_mode, errno, strerror(errno));
}
return 0;
}
}
n = ql_fread(bridge_mode, buf, sizeof(buf));
if (n > 0) {
in_bridge = (buf[0] != '0');
}
if (!in_bridge)
return 0;
memset(ipv4, 0, sizeof(ipv4));
if (strstr(bridge_ipv4, "/sys/class/net/") || profile->qmap_mode == 0 || profile->qmap_mode == 1) {
snprintf(ipv4, sizeof(ipv4), "0x%x", profile->ipv4.Address);
dbg_time("echo '%s' > %s", ipv4, bridge_ipv4);
ql_fwrite(bridge_ipv4, ipv4, strlen(ipv4));
}
else {
snprintf(ipv4, sizeof(ipv4), "0x%x:%d", profile->ipv4.Address, profile->muxid);
dbg_time("echo '%s' > %s", ipv4, bridge_ipv4);
ql_fwrite(bridge_ipv4, ipv4, strlen(ipv4));
}
return in_bridge;
}
int ql_enable_qmi_wwan_rawip_mode(PROFILE_T *profile) {
char filename[256];
char buf[4];
size_t n;
FILE *fp;
if (!qmidev_is_qmiwwan(profile->qmichannel))
return 0;
snprintf(filename, sizeof(filename), "/sys/class/net/%s/qmi/rawip", profile->usbnet_adapter);
n = ql_fread(filename, buf, sizeof(buf));
if (n == 0)
return 0;
if (buf[0] == '1' || buf[0] == 'Y')
return 0;
fp = fopen(filename , "w");
if (fp == NULL) {
dbg_time("Fail to fopen(%s, \"w\"), errno: %d (%s)", filename, errno, strerror(errno));
return 1;
}
buf[0] = 'Y';
n = fwrite(buf, 1, 1, fp);
if (n != 1) {
dbg_time("Fail to fwrite(%s), errno: %d (%s)", filename, errno, strerror(errno));
fclose(fp);
return 1;
}
fclose(fp);
return 0;
}
int ql_driver_type_detect(PROFILE_T *profile) {
if (qmidev_is_gobinet(profile->qmichannel)) {
profile->qmi_ops = &gobi_qmidev_ops;
}
else {
profile->qmi_ops = &qmiwwan_qmidev_ops;
}
qmidev_send = profile->qmi_ops->send;
return 0;
}
void ql_set_driver_bridge_mode(PROFILE_T *profile) {
char enable[16];
char filename[256];
if(profile->qmap_mode)
snprintf(filename, sizeof(filename), "/sys/class/net/%s/bridge_mode", profile->qmapnet_adapter);
else
snprintf(filename, sizeof(filename), "/sys/class/net/%s/bridge_mode", profile->usbnet_adapter);
snprintf(enable, sizeof(enable), "%02d\n", profile->enable_bridge);
ql_fwrite(filename, enable, sizeof(enable));
}
static int ql_qmi_qmap_mode_detect(PROFILE_T *profile) {
char buf[128];
int n;
struct {
char filename[255 * 2];
char linkname[255 * 2];
} *pl;
pl = (typeof(pl)) malloc(sizeof(*pl));
snprintf(pl->linkname, sizeof(pl->linkname), "/sys/class/net/%s/device/driver", profile->usbnet_adapter);
n = readlink(pl->linkname, pl->filename, sizeof(pl->filename));
pl->filename[n] = '\0';
while (pl->filename[n] != '/')
n--;
strncpy(profile->driver_name, &pl->filename[n+1], sizeof(profile->driver_name) - 1);
ql_get_driver_rmnet_info(profile, &profile->rmnet_info);
if (profile->rmnet_info.size) {
profile->qmap_mode = profile->rmnet_info.qmap_mode;
if (profile->qmap_mode) {
int offset_id = (profile->muxid == 0)? profile->pdp - 1 : profile->muxid - 0x81;
if (profile->qmap_mode == 1)
offset_id = 0;
profile->muxid = profile->rmnet_info.mux_id[offset_id];
strncpy(profile->qmapnet_adapter, profile->rmnet_info.ifname[offset_id], sizeof(profile->qmapnet_adapter) - 1);
profile->qmap_size = profile->rmnet_info.rx_urb_size;
profile->qmap_version = profile->rmnet_info.qmap_version;
}
goto _out;
}
snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/qmap_mode", profile->usbnet_adapter);
if (access(pl->filename, R_OK)) {
if (errno != ENOENT) {
dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno));
goto _out;
}
snprintf(pl->filename, sizeof(pl->filename), "/sys/module/%s/parameters/qmap_mode", profile->driver_name);
if (access(pl->filename, R_OK)) {
if (errno != ENOENT) {
dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno));
goto _out;
}
snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/device/driver/module/parameters/qmap_mode", profile->usbnet_adapter);
if (access(pl->filename, R_OK)) {
if (errno != ENOENT) {
dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno));
goto _out;
}
}
}
}
if (!access(pl->filename, R_OK)) {
n = ql_fread(pl->filename, buf, sizeof(buf));
if (n > 0) {
profile->qmap_mode = atoi(buf);
if (profile->qmap_mode > 1) {
if(!profile->muxid)
profile->muxid = profile->pdp + 0x80; //muxis is 0x8X for PDN-X
snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter),
"%.16s.%d", profile->usbnet_adapter, profile->muxid - 0x80);
} if (profile->qmap_mode == 1) {
profile->muxid = 0x81;
strncpy(profile->qmapnet_adapter, profile->usbnet_adapter, sizeof(profile->qmapnet_adapter));
}
}
}
else if (qmidev_is_qmiwwan(profile->qmichannel)) {
snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/qmimux%d", profile->pdp - 1);
if (access(pl->filename, R_OK)) {
if (errno != ENOENT) {
dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno));
}
goto _out;
}
//upstream Kernel Style QMAP qmi_wwan.c
snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/qmi/add_mux", profile->usbnet_adapter);
n = ql_fread(pl->filename, buf, sizeof(buf));
if (n >= 5) {
dbg_time("If use QMAP by /sys/class/net/%s/qmi/add_mux", profile->usbnet_adapter);
#if 1
dbg_time("Please set mtu of wwan0 >= max dl qmap packet size");
#else
dbg_time("File:%s Line:%d Please make sure add next patch to qmi_wwan.c", __func__, __LINE__);
/*
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 74bebbd..db8a777 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -379,6 +379,24 @@ static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, c
if (!ret) {
info->flags |= QMI_WWAN_FLAG_MUX;
ret = len;
+#if 1 //Add by Quectel
+ if (le16_to_cpu(dev->udev->descriptor.idVendor) == 0x2c7c) {
+ int idProduct = le16_to_cpu(dev->udev->descriptor.idProduct);
+
+ if (idProduct == 0x0121 || idProduct == 0x0125 || idProduct == 0x0435) //MDM9x07
+ dev->rx_urb_size = 4*1024;
+ else if (idProduct == 0x0306) //MDM9x40
+ dev->rx_urb_size = 16*1024;
+ else if (idProduct == 0x0512) //SDX20
+ dev->rx_urb_size = 32*1024;
+ else if (idProduct == 0x0620) //SDX24
+ dev->rx_urb_size = 32*1024;
+ else if (idProduct == 0x0800) //SDX55
+ dev->rx_urb_size = 32*1024;
+ else
+ dev->rx_urb_size = 32*1024;
+ }
+#endif
}
err:
rtnl_unlock();
*/
#endif
profile->qmap_mode = n/5; //0x11\n0x22\n0x33\n
if (profile->qmap_mode > 1) {
//PDN-X map to qmimux-X
if(!profile->muxid) {
profile->muxid = (buf[5*(profile->pdp - 1) + 2] - '0')*16 + (buf[5*(profile->pdp - 1) + 3] - '0');
snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "qmimux%d", profile->pdp - 1);
} else {
profile->muxid = (buf[5*(profile->muxid - 0x81) + 2] - '0')*16 + (buf[5*(profile->muxid - 0x81) + 3] - '0');
snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "qmimux%d", profile->muxid - 0x81);
}
} else if (profile->qmap_mode == 1) {
profile->muxid = (buf[5*0 + 2] - '0')*16 + (buf[5*0 + 3] - '0');
snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter),
"qmimux%d", 0);
}
}
}
_out:
if (profile->qmap_mode) {
if (profile->qmap_size == 0) {
profile->qmap_size = 16*1024;
snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/qmap_size", profile->usbnet_adapter);
if (!access(pl->filename, R_OK)) {
size_t n;
char buf[32];
n = ql_fread(pl->filename, buf, sizeof(buf));
if (n > 0) {
profile->qmap_size = atoi(buf);
}
}
}
if (profile->qmap_version == 0) {
profile->qmap_version = WDA_DL_DATA_AGG_QMAP_ENABLED;
}
dbg_time("qmap_mode = %d, qmap_version = %d, qmap_size = %d, muxid = 0x%02x, qmap_netcard = %s",
profile->qmap_mode, profile->qmap_version, profile->qmap_size, profile->muxid, profile->qmapnet_adapter);
}
ql_set_driver_bridge_mode(profile);
free(pl);
return 0;
}
static int ql_mbim_usb_vlan_mode_detect(PROFILE_T *profile) {
char tmp[128];
snprintf(tmp, sizeof(tmp), "/sys/class/net/%s.%d", profile->usbnet_adapter, profile->pdp);
if (!access(tmp, F_OK)) {
profile->qmap_mode = 4;
profile->muxid = profile->pdp;
no_trunc_strncpy(profile->qmapnet_adapter, tmp + strlen("/sys/class/net/"), sizeof(profile->qmapnet_adapter) - 1);
dbg_time("mbim_qmap_mode = %d, vlan_id = 0x%02x, qmap_netcard = %s",
profile->qmap_mode, profile->muxid, profile->qmapnet_adapter);
}
return 0;
}
static int ql_mbim_mhi_qmap_mode_detect(PROFILE_T *profile) {
ql_get_driver_rmnet_info(profile, &profile->rmnet_info);
if (profile->rmnet_info.size) {
profile->qmap_mode = profile->rmnet_info.qmap_mode;
if (profile->qmap_mode) {
int offset_id = profile->pdp - 1;
if (profile->qmap_mode == 1)
offset_id = 0;
profile->muxid = profile->pdp;
strcpy(profile->qmapnet_adapter, profile->rmnet_info.ifname[offset_id]);
profile->qmap_size = profile->rmnet_info.rx_urb_size;
profile->qmap_version = profile->rmnet_info.qmap_version;
dbg_time("mbim_qmap_mode = %d, vlan_id = 0x%02x, qmap_netcard = %s",
profile->qmap_mode, profile->muxid, profile->qmapnet_adapter);
}
goto _out;
}
_out:
return 0;
}
int ql_qmap_mode_detect(PROFILE_T *profile) {
if (profile->software_interface == SOFTWARE_MBIM) {
if (profile->hardware_interface == HARDWARE_USB)
return ql_mbim_usb_vlan_mode_detect(profile);
else if (profile->hardware_interface == HARDWARE_PCIE)
return ql_mbim_mhi_qmap_mode_detect(profile);
} else if (profile->software_interface == SOFTWARE_QMI) {
return ql_qmi_qmap_mode_detect(profile);
}
#ifdef CONFIG_QRTR
else if(profile->software_interface == SOFTWARE_QRTR) {
char tmp[128];
profile->qmap_mode = 4;
profile->qmap_version = WDA_DL_DATA_AGG_QMAP_V5_ENABLED;
profile->qmap_size = 31*1024;
profile->muxid = 0x80 | profile->pdp;
snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "rmnet_data%d", profile->muxid&0xF);
snprintf(tmp, sizeof(tmp), "/sys/class/net/%s", profile->qmapnet_adapter);
if (access(tmp, F_OK)) {
rtrmnet_ctl_create_vnd(profile->usbnet_adapter, profile->qmapnet_adapter,
profile->muxid, profile->qmap_version, 11, 4096);
}
}
#endif
return 0;
}

View File

@ -0,0 +1,657 @@
//https://github.com/andersson/qrtr
/******************************************************************************
@file QrtrCM.c
@brief GobiNet driver.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include <stdio.h>
#include <string.h>
#include <termios.h>
#include <stdio.h>
#include <ctype.h>
#include "QMIThread.h"
typedef struct {
uint32_t service;
uint32_t version;
uint32_t instance;
uint32_t node;
uint32_t port;
} QrtrService;
#define QRTR_MAX (QMUX_TYPE_WDS_ADMIN + 1)
static QrtrService service_list[QRTR_MAX];
static int qmiclientId[QRTR_MAX];
static int get_client(UCHAR QMIType);
static uint32_t node_modem = 3; //IPQ ~ 3, QCM ~ 0
#ifdef USE_LINUX_MSM_IPC
#include <linux/msm_ipc.h>
struct xport_ipc_router_server_addr {
uint32_t service;
uint32_t instance;
uint32_t node_id;
uint32_t port_id;
};
union ctl_msg {
uint32_t cmd;
struct {
uint32_t cmd;
uint32_t service;
uint32_t instance;
uint32_t node_id;
uint32_t port_id;
} srv;
struct {
uint32_t cmd;
uint32_t node_id;
uint32_t port_id;
} cli;
};
#define CTL_CMD_NEW_SERVER 4
#define CTL_CMD_REMOVE_SERVER 5
#define VERSION_MASK 0xff
#define GET_VERSION(x) (x & 0xff)
#define GET_XPORT_SVC_INSTANCE(x) GET_VERSION(x)
#define GET_INSTANCE(x) ((x & 0xff00) >> 8)
static int msm_ipc_socket(const char *name)
{
int sock;
int flags;
sock = socket(AF_MSM_IPC, SOCK_DGRAM, 0);
if (sock < 0) {
dbg_time("%s(%s) errno: %d (%s)\n", __func__, name, errno, strerror(errno));
return -1;
}
fcntl(sock, F_SETFD, FD_CLOEXEC);
flags = fcntl(sock, F_GETFL, 0);
fcntl(sock, F_SETFL, flags | O_NONBLOCK);
return sock;
}
static uint32_t xport_lookup
(
int lookup_sock_fd,
uint32_t service_id,
uint32_t version
)
{
uint32_t num_servers_found = 0;
uint32_t num_entries_to_fill = 4;
struct server_lookup_args *lookup_arg;
int i;
lookup_arg = (struct server_lookup_args *)malloc(sizeof(*lookup_arg)
+ (num_entries_to_fill * sizeof(struct msm_ipc_server_info)));
if (!lookup_arg)
{
dbg_time("%s: Malloc failed\n", __func__);
return 0;
}
lookup_arg->port_name.service = service_id;
lookup_arg->port_name.instance = GET_XPORT_SVC_INSTANCE(version);
lookup_arg->num_entries_in_array = num_entries_to_fill;
lookup_arg->lookup_mask = VERSION_MASK;
lookup_arg->num_entries_found = 0;
if (ioctl(lookup_sock_fd, IPC_ROUTER_IOCTL_LOOKUP_SERVER, lookup_arg) < 0)
{
dbg_time("%s: Lookup failed for %08x: %08x\n", __func__, service_id, version);
free(lookup_arg);
return 0;
}
dbg_time("%s: num_entries_found %d for type=%d instance=%d", __func__,
lookup_arg->num_entries_found, service_id, version);
num_servers_found = 0;
for (i = 0; ((i < (int)num_entries_to_fill) && (i < lookup_arg->num_entries_found)); i++)
{
QrtrService service_info[1];
if (lookup_arg->srv_info[i].node_id != node_modem)
continue;
num_servers_found++;
service_info[0].service = lookup_arg->srv_info[i].service;
service_info[0].version = GET_VERSION(lookup_arg->srv_info[i].instance);
service_info[0].instance = GET_INSTANCE(lookup_arg->srv_info[i].instance);
service_info[0].node = lookup_arg->srv_info[i].node_id;
service_info[0].port = lookup_arg->srv_info[i].port_id;
service_list[service_id] = service_info[0];
qmiclientId[service_id] = get_client(service_id);
}
free(lookup_arg);
return num_servers_found;
}
static int xport_send(int sock, uint32_t node, uint32_t port, const void *data, unsigned int sz)
{
struct sockaddr_msm_ipc addr = {};
int rc;
addr.family = AF_MSM_IPC;
addr.address.addrtype = MSM_IPC_ADDR_ID;
addr.address.addr.port_addr.node_id = node;
addr.address.addr.port_addr.port_id = port;
rc = sendto(sock, data, sz, MSG_DONTWAIT, (void *)&addr, sizeof(addr));
if (rc < 0) {
dbg_time("xport_send errno: %d (%s)\n", errno, strerror(errno));
return -1;
}
return 0;
}
static int xport_recv(int sock, void *data, unsigned int sz, uint32_t *node, uint32_t *port)
{
struct sockaddr_msm_ipc addr = {};
socklen_t addr_size = sizeof(struct sockaddr_msm_ipc);
int rc;
rc = recvfrom(sock, data, sz, MSG_DONTWAIT, (void *)&addr, &addr_size);
if (rc < 0) {
dbg_time("xport_recv errno: %d (%s)\n", errno, strerror(errno));
}
else if (addr.address.addrtype != MSM_IPC_ADDR_ID) {
dbg_time("xport_recv addrtype is NOT MSM_IPC_ADDR_ID\n");
rc = -1;
}
*node = addr.address.addr.port_addr.node_id;
*port = addr.address.addr.port_addr.port_id;
return rc;
}
#define qmi_recv xport_recv
static int xport_ctrl_init(void)
{
int ctrl_sock;
int rc;
uint32_t instance = 1; //modem
uint32_t version;
ctrl_sock = msm_ipc_socket("ctrl_port");
if (ctrl_sock == -1)
return -1;
rc = ioctl(ctrl_sock, IPC_ROUTER_IOCTL_GET_VERSION, &version);
if (rc < 0) {
dbg_time("%s: failed to get ipc version\n", __func__);
goto init_close_ctrl_fd;
}
dbg_time("%s ipc_version = %d", __func__, version);
rc = ioctl(ctrl_sock, IPC_ROUTER_IOCTL_BIND_CONTROL_PORT, NULL);
if (rc < 0) {
dbg_time("%s: failed to bind as control port\n", __func__);
goto init_close_ctrl_fd;
}
//cat /sys/kernel/debug/msm_ipc_router/dump_servers
rc = 0;
rc += xport_lookup(ctrl_sock, QMUX_TYPE_WDS, instance);
if (service_list[QMUX_TYPE_WDS].port) {
qmiclientId[QMUX_TYPE_WDS_IPV6] = get_client(QMUX_TYPE_WDS);
}
rc += xport_lookup(ctrl_sock, QMUX_TYPE_NAS, instance);
rc += xport_lookup(ctrl_sock, QMUX_TYPE_UIM, instance);
rc += xport_lookup(ctrl_sock, QMUX_TYPE_DMS, instance);
rc += xport_lookup(ctrl_sock, QMUX_TYPE_WDS_ADMIN, instance);
if (rc == 0) {
dbg_time("%s: failed to lookup qmi service\n", __func__);
goto init_close_ctrl_fd;
}
return ctrl_sock;
init_close_ctrl_fd:
close(ctrl_sock);
return -1;
}
static void handle_ctrl_pkt(int sock) {
union ctl_msg pkt;
uint32_t type;
int rc;
rc = recvfrom(sock, &pkt, sizeof(pkt), 0, NULL, NULL);
if (rc < 0)
return;
type = le32toh(pkt.cmd);
if (CTL_CMD_NEW_SERVER == type || CTL_CMD_REMOVE_SERVER == type) {
QrtrService s;
s.service = le32toh(pkt.srv.service);
s.version = le32toh(pkt.srv.instance) & 0xff;
s.instance = le32toh(pkt.srv.instance) >> 8;
s.node = le32toh(pkt.srv.node_id);
s.port = le32toh(pkt.srv.port_id);
if (debug_qmi)
dbg_time ("[qrtr] %s server on %u:%u -> service %u, version %u, instance %u",
CTL_CMD_NEW_SERVER == type ? "add" : "remove",
s.node, s.port, s.service, s.version, s.instance);
if (CTL_CMD_NEW_SERVER == type) {
if (s.service < QRTR_MAX) {
service_list[s.service] = s;
}
}
else if (CTL_CMD_REMOVE_SERVER == type) {
if (s.service < QRTR_MAX) {
memset(&service_list[s.service], 0, sizeof(QrtrService));
}
}
}
}
#else
#include <linux/socket.h>
#include "qrtr.h"
#endif
static int qrtr_socket(void)
{
struct sockaddr_qrtr sq;
socklen_t sl = sizeof(sq);
int sock;
int rc;
sock = socket(AF_QIPCRTR, SOCK_DGRAM, 0);
if (sock < 0) {
dbg_time("qrtr_socket errno: %d (%s)\n", errno, strerror(errno));
return -1;
}
rc = getsockname(sock, (void *)&sq, &sl);
if (rc || sq.sq_family != AF_QIPCRTR || sl != sizeof(sq)) {
dbg_time("getsockname: %d (%s)\n", errno, strerror(errno));
close(sock);
return -1;
}
return sock;
}
static int qrtr_send(int sock, uint32_t node, uint32_t port, const void *data, unsigned int sz)
{
struct sockaddr_qrtr sq = {};
int rc;
sq.sq_family = AF_QIPCRTR;
sq.sq_node = node;
sq.sq_port = port;
rc = sendto(sock, data, sz, MSG_DONTWAIT, (void *)&sq, sizeof(sq));
if (rc < 0) {
dbg_time("sendto errno: %d (%s)\n", errno, strerror(errno));
return -1;
}
return 0;
}
static int qrtr_recv(int sock, void *data, unsigned int sz, uint32_t *node, uint32_t *port)
{
struct sockaddr_qrtr sq = {};
socklen_t sl = sizeof(sq);
int rc;
rc = recvfrom(sock, data, sz, MSG_DONTWAIT, (void *)&sq, &sl);
if (rc < 0) {
dbg_time("qrtr_recv errno: %d (%s)\n", errno, strerror(errno));
}
*node = sq.sq_node;
*port = sq.sq_port;
return rc;
}
#define qmi_recv qrtr_recv
static int qrtr_ctrl_init(void) {
int sock;
int rc;
struct qrtr_ctrl_pkt pkt;
struct sockaddr_qrtr sq;
socklen_t sl = sizeof(sq);
sock = qrtr_socket();
if (sock == -1)
return -1;
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = htole32(QRTR_TYPE_NEW_LOOKUP);
getsockname(sock, (void *)&sq, &sl);
rc = qrtr_send(sock, sq.sq_node, QRTR_PORT_CTRL, &pkt, sizeof(pkt));
if (rc == -1) {
dbg_time("qrtr_send errno: %d (%s)\n", errno, strerror(errno));
close(sock);
return -1;
}
return sock;
}
static void handle_server_change(uint32_t type, struct qrtr_ctrl_pkt *ppkt) {
struct qrtr_ctrl_pkt pkt = *ppkt;
QrtrService s;
s.service = le32toh(pkt.server.service);
s.version = le32toh(pkt.server.instance) & 0xff;
s.instance = le32toh(pkt.server.instance) >> 8;
s.node = le32toh(pkt.server.node);
s.port = le32toh(pkt.server.port);
if (debug_qmi)
dbg_time ("[qrtr] %s server on %u:%u -> service %u, version %u, instance %u",
QRTR_TYPE_NEW_SERVER == type ? "add" : "remove",
s.node, s.port, s.service, s.version, s.instance);
if (s.node != node_modem)
return; //we only care modem
if (QRTR_TYPE_NEW_SERVER == type) {
if (s.service < QRTR_MAX) {
service_list[s.service] = s;
}
}
else if (QRTR_TYPE_DEL_SERVER == type) {
if (s.service < QRTR_MAX) {
memset(&service_list[s.service], 0, sizeof(QrtrService));
}
}
}
static void handle_ctrl_pkt(int sock) {
struct qrtr_ctrl_pkt pkt;
struct sockaddr_qrtr sq;
socklen_t sl = sizeof(sq);
uint32_t type;
int rc;
rc = recvfrom(sock, &pkt, sizeof(pkt), 0, (void *)&sq, &sl);
if (rc < 0)
return;
type = le32toh(pkt.cmd);
if (debug_qmi)
dbg_time("type %u, node %u, sq.port %x, len: %d", type, sq.sq_node, sq.sq_port, rc);
if (sq.sq_port != QRTR_PORT_CTRL)
return;
if (QRTR_TYPE_NEW_SERVER == type || QRTR_TYPE_DEL_SERVER == type) {
handle_server_change(type, &pkt);
}
}
static int get_client(UCHAR QMIType) {
int ClientId;
QrtrService *s = &service_list[QMIType];
if (!s ->service) {
dbg_time("%s service: %d for QMIType: %d", __func__, s ->service, QMIType);
return -ENODEV;
}
#ifdef USE_LINUX_MSM_IPC
ClientId = msm_ipc_socket("xport");
#else
ClientId = qrtr_socket();
#endif
if (ClientId == -1) {
return 0;
}
switch (QMIType) {
case QMUX_TYPE_WDS: dbg_time("Get clientWDS = %d", ClientId); break;
case QMUX_TYPE_DMS: dbg_time("Get clientDMS = %d", ClientId); break;
case QMUX_TYPE_NAS: dbg_time("Get clientNAS = %d", ClientId); break;
case QMUX_TYPE_QOS: dbg_time("Get clientQOS = %d", ClientId); break;
case QMUX_TYPE_WMS: dbg_time("Get clientWMS = %d", ClientId); break;
case QMUX_TYPE_PDS: dbg_time("Get clientPDS = %d", ClientId); break;
case QMUX_TYPE_UIM: dbg_time("Get clientUIM = %d", ClientId); break;
case QMUX_TYPE_WDS_ADMIN: dbg_time("Get clientWDA = %d", ClientId);
break;
default: break;
}
return ClientId;
}
static void handle_alloc_client(PROFILE_T *profile) {
int srv_list[] = {QMUX_TYPE_WDS, QMUX_TYPE_NAS, QMUX_TYPE_UIM, QMUX_TYPE_DMS, QMUX_TYPE_WDS_ADMIN};
size_t i = 0, srv_ready = 0;
static int report = -1;
if (report != -1)
return;
for(i = 0; i < sizeof(srv_list)/sizeof(srv_list[0]); i++) {
int srv = srv_list[i];
if (service_list[srv].service)
srv_ready++;
else
continue;
if (qmiclientId[srv] == 0) {
qmiclientId[srv] = get_client(srv);
if (qmiclientId[srv] != 0) {
if (srv == QMUX_TYPE_WDS) {
qmiclientId[QMUX_TYPE_WDS_IPV6] = get_client(QMUX_TYPE_WDS);
}
else if (srv == QMUX_TYPE_WDS_ADMIN) {
profile->wda_client = qmiclientId[QMUX_TYPE_WDS_ADMIN];
}
}
}
}
if (srv_ready == sizeof(srv_list)/sizeof(srv_list[0])) {
if (qmiclientId[QMUX_TYPE_WDS]) {
qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED);
} else {
qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED);
}
report = 1;
}
}
static int qmi_send(PQCQMIMSG pRequest) {
uint8_t QMIType = pRequest->QMIHdr.QMIType;
int sock;
QrtrService *s = &service_list[QMIType == QMUX_TYPE_WDS_IPV6 ? QMUX_TYPE_WDS: QMIType];
sock = qmiclientId[QMIType];
pRequest->QMIHdr.ClientId = 0xaa;
if (!s ->service || !sock) {
dbg_time("%s service: %d, sock: %d for QMIType: %d", __func__, s ->service, sock, QMIType);
return -ENODEV;
}
#ifdef USE_LINUX_MSM_IPC
return xport_send(sock, s->node, s->port, &pRequest->MUXMsg,
le16_to_cpu(pRequest->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR));
#else
return qrtr_send(sock, s->node, s->port, &pRequest->MUXMsg,
le16_to_cpu(pRequest->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR));
#endif
}
static int qmi_deinit(void) {
unsigned int i;
for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++)
{
if (qmiclientId[i] != 0)
{
close(qmiclientId[i]);
qmiclientId[i] = 0;
}
}
return 0;
}
static void * qmi_read(void *pData) {
PROFILE_T *profile = (PROFILE_T *)pData;
int ctrl_sock;
int wait_for_request_quit = 0;
#ifdef USE_LINUX_MSM_IPC
ctrl_sock = xport_ctrl_init();
if (ctrl_sock != -1)
qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED);
#else
ctrl_sock = qrtr_ctrl_init();
#endif
if (ctrl_sock == -1)
goto _quit;
while (1) {
struct pollfd pollfds[16] = {{qmidevice_control_fd[1], POLLIN, 0}, {ctrl_sock, POLLIN, 0}};
int ne, ret, nevents = 2;
unsigned int i;
for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++)
{
if (qmiclientId[i] != 0)
{
pollfds[nevents].fd = qmiclientId[i];
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents = 0;
nevents++;
}
}
do {
ret = poll(pollfds, nevents, wait_for_request_quit ? 1000 : -1);
} while ((ret < 0) && (errno == EINTR));
if (ret == 0 && wait_for_request_quit) {
QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI()
continue;
}
if (ret <= 0) {
dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno));
break;
}
for (ne = 0; ne < nevents; ne++) {
int fd = pollfds[ne].fd;
short revents = pollfds[ne].revents;
if (revents & (POLLERR | POLLHUP | POLLNVAL)) {
dbg_time("%s poll err/hup/inval", __func__);
dbg_time("epoll fd = %d, events = 0x%04x", fd, revents);
if (fd == qmidevice_control_fd[1]) {
} else {
}
if (revents & (POLLERR | POLLHUP | POLLNVAL))
goto _quit;
}
if ((revents & POLLIN) == 0)
continue;
if (fd == qmidevice_control_fd[1]) {
int triger_event;
if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) {
//DBG("triger_event = 0x%x", triger_event);
switch (triger_event) {
case RIL_REQUEST_QUIT:
goto _quit;
break;
case SIG_EVENT_STOP:
wait_for_request_quit = 1;
break;
default:
break;
}
}
}
else if (fd == ctrl_sock) {
handle_ctrl_pkt(ctrl_sock);
handle_alloc_client(profile);
}
else
{
PQCQMIMSG pResponse = (PQCQMIMSG)cm_recv_buf;
int rc;
uint32_t sq_node = 0;
uint32_t sq_port = 0;
rc = qmi_recv(fd, &pResponse->MUXMsg, sizeof(cm_recv_buf) - sizeof(QCQMI_HDR), &sq_node, &sq_port);
if (debug_qmi)
dbg_time("fd %d, node %u, port %x, len: %d", fd, sq_node, sq_port, rc);
if (rc <= 0)
{
dbg_time("%s read=%d errno: %d (%s)", __func__, rc, errno, strerror(errno));
break;
}
for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++)
{
if (qmiclientId[i] == fd)
{
pResponse->QMIHdr.QMIType = i;
if (service_list[i].node != sq_node || service_list[i].port != sq_port) {
continue;
}
}
}
pResponse->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI;
pResponse->QMIHdr.Length = cpu_to_le16(rc + sizeof(QCQMI_HDR) - 1);
pResponse->QMIHdr.CtlFlags = 0x00;
pResponse->QMIHdr.ClientId = 0xaa;
QmiThreadRecvQMI(pResponse);
}
}
}
_quit:
qmi_deinit();
close(ctrl_sock);
qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED);
QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI()
dbg_time("%s exit", __func__);
pthread_exit(NULL);
return NULL;
}
const struct qmi_device_ops qrtr_qmidev_ops = {
.deinit = qmi_deinit,
.send = qmi_send,
.read = qmi_read,
};

View File

@ -0,0 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _LINUX_QRTR_H
#define _LINUX_QRTR_H
#include <linux/socket.h>
#include <linux/types.h>
#ifndef AF_QIPCRTR
#define AF_QIPCRTR 42
#endif
#define QRTR_NODE_BCAST 0xffffffffu
#define QRTR_PORT_CTRL 0xfffffffeu
struct sockaddr_qrtr {
__kernel_sa_family_t sq_family;
__u32 sq_node;
__u32 sq_port;
};
enum qrtr_pkt_type {
QRTR_TYPE_DATA = 1,
QRTR_TYPE_HELLO = 2,
QRTR_TYPE_BYE = 3,
QRTR_TYPE_NEW_SERVER = 4,
QRTR_TYPE_DEL_SERVER = 5,
QRTR_TYPE_DEL_CLIENT = 6,
QRTR_TYPE_RESUME_TX = 7,
QRTR_TYPE_EXIT = 8,
QRTR_TYPE_PING = 9,
QRTR_TYPE_NEW_LOOKUP = 10,
QRTR_TYPE_DEL_LOOKUP = 11,
};
#define QRTR_TYPE_DEL_PROC 13
struct qrtr_ctrl_pkt {
__le32 cmd;
union {
struct {
__le32 service;
__le32 instance;
__le32 node;
__le32 port;
} server;
struct {
__le32 node;
__le32 port;
} client;
struct {
__le32 rsvd;
__le32 node;
} proc;
};
} __attribute__ ((packed));
#define QRTR_PROTO_VER_1 1
struct qrtr_hdr_v1 {
__le32 version;
__le32 type;
__le32 src_node_id;
__le32 src_port_id;
__le32 confirm_rx;
__le32 size;
__le32 dst_node_id;
__le32 dst_port_id;
} __attribute__ ((packed));
#endif /* _LINUX_QRTR_H */

View File

@ -0,0 +1,506 @@
/******************************************************************************
@file quectel-atc-proxy.c
@brief atc proxy.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <fcntl.h>
#include <stddef.h>
#include <pthread.h>
#include <errno.h>
#include <time.h>
#include <signal.h>
#include <getopt.h>
#include <poll.h>
#include <sys/time.h>
#include <endian.h>
#include <time.h>
#include <sys/types.h>
#include <limits.h>
#include <inttypes.h>
#include <sys/socket.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <stdarg.h>
#include <stddef.h>
#include <fcntl.h>
#include <pthread.h>
#include <poll.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <linux/un.h>
#include <linux/if.h>
#include <dirent.h>
#include <signal.h>
#include <endian.h>
#include <inttypes.h>
#include "qlist.h"
#include "QMIThread.h"
#include "atchannel.h"
#include "at_tok.h"
#define dprintf(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0);
#define SYSCHECK(c) do{if((c)<0) {dprintf("%s %d error: '%s' (code: %d)\n", __func__, __LINE__, strerror(errno), errno); return -1;}}while(0)
#define cfmakenoblock(fd) do{fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK);}while(0)
#define safe_free(__x) do { if (__x) { free((void *)__x); __x = NULL;}} while(0)
#define safe_at_response_free(__x) { if (__x) { at_response_free(__x); __x = NULL;}}
#define at_response_error(err, p_response) \
(err \
|| p_response == NULL \
|| p_response->finalResponse == NULL \
|| p_response->success == 0)
typedef struct {
struct qlistnode qnode;
int ClientFd;
unsigned AccessTime;
} ATC_PROXY_CONNECTION;
static int atc_proxy_quit = 0;
static pthread_t thread_id = 0;
static int atc_dev_fd = -1;
static int atc_proxy_server_fd = -1;
static struct qlistnode atc_proxy_connection;
static int verbose_debug = 0;
static int modem_reset_flag = 0;
static uint8_t atc_buf[4096];
static int asr_style_atc = 0;
extern int asprintf(char **s, const char *fmt, ...);
static ATC_PROXY_CONNECTION *current_client_fd = NULL;
static void dump_atc(uint8_t *pATC, int fd,int size, const char flag)
{
if (verbose_debug) {
printf("%c %d:\n", flag, fd);
printf("%.*s\n", size, pATC);
}
}
static int send_atc_to_client(int clientFd, uint8_t *pATC, int size) {
struct pollfd pollfds[]= {{clientFd, POLLOUT, 0}};
ssize_t ret = 0;
do {
ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000);
} while (ret == -1 && errno == EINTR && atc_proxy_quit == 0);
if (pollfds[0].revents & POLLOUT) {
ret = write(clientFd, pATC, size);
}
return ret;
}
static void onUnsolicited (const char *s, const char *sms_pdu)
{
struct qlistnode *con_node;
int ret;
char buf[1024];
if(s) {
strcpy(buf, s);
strcat(buf, "\r\n");
}
if(sms_pdu) {
strcat(buf, sms_pdu);
strcat(buf, "\r\n");
}
if(current_client_fd) {
ATC_PROXY_CONNECTION *atc_con = current_client_fd;
ret = send_atc_to_client(atc_con->ClientFd, (uint8_t *)buf, strlen(buf));
if(ret < 0) {
close(atc_con->ClientFd);
qlist_remove(&atc_con->qnode);
free(atc_con);
}
return;
}
qlist_for_each(con_node, &atc_proxy_connection) {
ATC_PROXY_CONNECTION *atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode);
if(atc_con && atc_con->ClientFd > 0) {
ret = send_atc_to_client(atc_con->ClientFd, (uint8_t *)buf, strlen(buf));
if(ret < 0) {
close(atc_con->ClientFd);
con_node = con_node->prev;
qlist_remove(&atc_con->qnode);
free(atc_con);
continue;
}
}
}
}
static void onTimeout(void) {
dprintf("%s", __func__);
//TODO
}
static void onClose(void) {
dprintf("%s", __func__);
}
static int create_local_server(const char *name) {
int sockfd = -1;
int reuse_addr = 1;
struct sockaddr_un sockaddr;
socklen_t alen;
/*Create server socket*/
SYSCHECK(sockfd = socket(AF_LOCAL, SOCK_STREAM, 0));
memset(&sockaddr, 0, sizeof(sockaddr));
sockaddr.sun_family = AF_LOCAL;
sockaddr.sun_path[0] = 0;
memcpy(sockaddr.sun_path + 1, name, strlen(name) );
alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1;
SYSCHECK(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr)));
if(bind(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) {
close(sockfd);
dprintf("bind %s errno: %d (%s)\n", name, errno, strerror(errno));
return -1;
}
dprintf("local server: %s sockfd = %d\n", name, sockfd);
cfmakenoblock(sockfd);
listen(sockfd, 1);
return sockfd;
}
static void accept_atc_connection(int serverfd) {
int clientfd = -1;
unsigned char addr[128];
socklen_t alen = sizeof(addr);
ATC_PROXY_CONNECTION *atc_con;
clientfd = accept(serverfd, (struct sockaddr *)addr, &alen);
atc_con = (ATC_PROXY_CONNECTION *)malloc(sizeof(ATC_PROXY_CONNECTION));
if (atc_con) {
qlist_init(&atc_con->qnode);
atc_con->ClientFd= clientfd;
atc_con->AccessTime = 0;
dprintf("+++ ClientFd=%d\n", atc_con->ClientFd);
qlist_add_tail(&atc_proxy_connection, &atc_con->qnode);
}
cfmakenoblock(clientfd);
}
static void cleanup_atc_connection(int clientfd) {
struct qlistnode *con_node;
qlist_for_each(con_node, &atc_proxy_connection) {
ATC_PROXY_CONNECTION *atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode);
if (atc_con->ClientFd == clientfd) {
dprintf("--- ClientFd=%d\n", atc_con->ClientFd);
close(atc_con->ClientFd);
qlist_remove(&atc_con->qnode);
free(atc_con);
if (current_client_fd == atc_con)
current_client_fd = NULL;
break;
}
}
}
static int atc_proxy_init(void) {
int err;
char *cmd;
ATResponse *p_response = NULL;
err = at_handshake();
if (err) {
dprintf("handshake fail, TODO ... ");
goto exit;
}
at_send_command_singleline("AT+QCFG=\"usbnet\"", "+QCFG:", NULL);
at_send_command_multiline("AT+QNETDEVCTL=?", "+QNETDEVCTL:", NULL);
at_send_command("AT+CGREG=2", NULL); //GPRS Network Registration Status
at_send_command("AT+CEREG=2", NULL); //EPS Network Registration Status
at_send_command("AT+C5GREG=2", NULL); //5GS Network Registration Status
at_send_command_singleline("AT+QNETDEVSTATUS=?", "+QNETDEVSTATUS:", &p_response);
if (at_response_error(err, p_response))
asr_style_atc = 1; //EC200T/EC100Y do not support this AT, but RG801/RG500U support
safe_at_response_free(p_response);
err = at_send_command_singleline("AT+QCFG=\"NAT\"", "+QCFG:", &p_response);
if (!at_response_error(err, p_response)) {
int old_nat, new_nat = asr_style_atc ? 1 : 0;
err = at_tok_scanf(p_response->p_intermediates->line, "%s%d", NULL, &old_nat);
if (err == 2 && old_nat != new_nat) {
safe_at_response_free(p_response);
asprintf(&cmd, "AT+QCFG=\"NAT\",%d", new_nat);
err = at_send_command(cmd, &p_response);
safe_free(cmd);
if (!at_response_error(err, p_response)) {
err = at_send_command("at+cfun=1,1",NULL);
}
safe_at_response_free(p_response);
}
err = 0;
}
safe_at_response_free(p_response);
exit:
return err;
}
static void atc_start_server(const char* servername) {
atc_proxy_server_fd = create_local_server(servername);
dprintf("atc_proxy_server_fd = %d\n", atc_proxy_server_fd);
if (atc_proxy_server_fd == -1) {
dprintf("Failed to create %s, errno: %d (%s)\n", servername, errno, strerror(errno));
}
}
static void atc_close_server(const char* servername) {
if (atc_proxy_server_fd != -1) {
dprintf("%s %s close server\n", __func__, servername);
close(atc_proxy_server_fd);
atc_proxy_server_fd = -1;
}
}
static void *atc_proxy_loop(void *param)
{
uint8_t *pATC = atc_buf;
struct qlistnode *con_node;
ATC_PROXY_CONNECTION *atc_con;
(void)param;
dprintf("%s enter thread_id %p\n", __func__, (void *)pthread_self());
qlist_init(&atc_proxy_connection);
while (atc_dev_fd > 0 && atc_proxy_quit == 0) {
struct pollfd pollfds[2+64];
int ne, ret, nevents = 0;
ssize_t nreads;
pollfds[nevents].fd = atc_dev_fd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
if (atc_proxy_server_fd > 0) {
pollfds[nevents].fd = atc_proxy_server_fd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
}
qlist_for_each(con_node, &atc_proxy_connection) {
atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode);
pollfds[nevents].fd = atc_con->ClientFd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
if (nevents == (sizeof(pollfds)/sizeof(pollfds[0])))
break;
}
do {
ret = poll(pollfds, nevents, (atc_proxy_server_fd > 0) ? -1 : 200);
} while (ret == -1 && errno == EINTR && atc_proxy_quit == 0);
if (ret < 0) {
dprintf("%s poll=%d, errno: %d (%s)\n", __func__, ret, errno, strerror(errno));
goto atc_proxy_loop_exit;
}
for (ne = 0; ne < nevents; ne++) {
int fd = pollfds[ne].fd;
short revents = pollfds[ne].revents;
if (revents & (POLLERR | POLLHUP | POLLNVAL)) {
dprintf("%s poll fd = %d, revents = %04x\n", __func__, fd, revents);
if (fd == atc_dev_fd) {
goto atc_proxy_loop_exit;
} else if(fd == atc_proxy_server_fd) {
} else {
cleanup_atc_connection(fd);
}
continue;
}
if (!(pollfds[ne].revents & POLLIN)) {
continue;
}
if (fd == atc_proxy_server_fd) {
accept_atc_connection(fd);
}
else if (fd == atc_dev_fd) {
usleep(10*1000); //let atchannel.c read at response.
if (modem_reset_flag)
goto atc_proxy_loop_exit;
}
else {
memset(atc_buf, 0x0, sizeof(atc_buf));
nreads = read(fd, pATC, sizeof(atc_buf));
if (nreads <= 0) {
dprintf("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno));
cleanup_atc_connection(fd);
break;
}
dump_atc(pATC, fd, nreads, 'r');
qlist_for_each(con_node, &atc_proxy_connection) {
atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode);
if (atc_con->ClientFd == pollfds[nevents].fd) {
current_client_fd = atc_con;
break;
}
}
at_send_command ((const char *)pATC, NULL);
current_client_fd = NULL;
}
}
}
atc_proxy_loop_exit:
at_close();
while (!qlist_empty(&atc_proxy_connection)) {
ATC_PROXY_CONNECTION *atc_con = qnode_to_item(qlist_head(&atc_proxy_connection), ATC_PROXY_CONNECTION, qnode);
cleanup_atc_connection(atc_con->ClientFd);
}
dprintf("%s exit, thread_id %p\n", __func__, (void *)pthread_self());
return NULL;
}
static void usage(void) {
dprintf(" -d <device_name> A valid atc device\n"
" default /dev/ttyUSB2, but /dev/ttyUSB2 may be invalid\n"
" -i <netcard_name> netcard name\n"
" -v Will show all details\n");
}
static void sig_action(int sig) {
if (atc_proxy_quit == 0) {
atc_proxy_quit = 1;
if (thread_id)
pthread_kill(thread_id, sig);
}
}
int main(int argc, char *argv[]) {
int opt;
char atc_dev[32+1] = "/dev/ttyUSB2";
int retry_times = 0;
char servername[64] = {0};
optind = 1;
signal(SIGINT, sig_action);
while ( -1 != (opt = getopt(argc, argv, "d:i:vh"))) {
switch (opt) {
case 'd':
strcpy(atc_dev, optarg);
break;
case 'v':
verbose_debug = 1;
break;
default:
usage();
return 0;
}
}
if (access(atc_dev, R_OK | W_OK)) {
dprintf("Fail to access %s, errno: %d (%s). break\n", atc_dev, errno, strerror(errno));
return -1;
}
sprintf(servername, "quectel-atc-proxy%c", atc_dev[strlen(atc_dev) - 1]);
dprintf("Will use atc-dev='%s', proxy='%s'\n", atc_dev, servername);
while (atc_proxy_quit == 0) {
if (access(atc_dev, R_OK | W_OK)) {
dprintf("Fail to access %s, errno: %d (%s). continue\n", atc_dev, errno, strerror(errno));
// wait device
sleep(3);
continue;
}
atc_dev_fd = open(atc_dev, O_RDWR | O_NONBLOCK | O_NOCTTY);
if (atc_dev_fd == -1) {
dprintf("Failed to open %s, errno: %d (%s). break\n", atc_dev, errno, strerror(errno));
return -1;
}
cfmakenoblock(atc_dev_fd);
if (at_open(atc_dev_fd, onUnsolicited, 1)) {
close(atc_dev_fd);
atc_dev_fd = -1;
}
at_set_on_timeout(onTimeout);
at_set_on_reader_closed(onClose);
/* no atc_proxy_loop lives, create one */
pthread_create(&thread_id, NULL, atc_proxy_loop, NULL);
/* try to redo init if failed, init function must be successfully */
while (atc_proxy_init() != 0) {
if (retry_times < 5) {
dprintf("fail to init proxy, try again in 2 seconds.\n");
sleep(2);
retry_times++;
} else {
dprintf("has failed too much times, restart the modem and have a try...\n");
break;
}
/* break loop if modem is detached */
if (access(atc_dev, F_OK|R_OK|W_OK))
break;
}
retry_times = 0;
atc_start_server(servername);
if (atc_proxy_server_fd == -1)
pthread_cancel(thread_id);
pthread_join(thread_id, NULL);
/* close local server at last */
atc_close_server(servername);
close(atc_dev_fd);
/* DO RESTART IN 20s IF MODEM RESET ITSELF */
if (modem_reset_flag) {
unsigned int time_to_wait = 20;
while (time_to_wait) {
time_to_wait = sleep(time_to_wait);
}
modem_reset_flag = 0;
}
}
return 0;
}

View File

@ -0,0 +1,453 @@
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <stdarg.h>
#include <stddef.h>
#include <fcntl.h>
#include <pthread.h>
#include <poll.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <linux/un.h>
#include <linux/in.h>
#include <linux/if.h>
#include <dirent.h>
#include <signal.h>
#include <inttypes.h>
#include <getopt.h>
#include "qendian.h"
#define QUECTEL_MBIM_PROXY "quectel-mbim-proxy"
#define safe_close(_fd) do { if (_fd > 0) { close(_fd); _fd = -1; } } while(0)
#define CM_MAX_CLIENT 32
#define TID_MASK (0xFFFFFF)
#define TID_SHIFT (24)
typedef enum {
MBIM_OPEN_MSG = 1,
MBIM_CLOSE_MSG = 2,
MBIM_OPEN_DONE = 0x80000001,
MBIM_CLOSE_DONE = 0x80000002,
} MBIM_MSG;
typedef struct {
unsigned int MessageType;
unsigned int MessageLength;
unsigned int TransactionId;
} MBIM_MESSAGE_HEADER;
typedef struct {
MBIM_MESSAGE_HEADER MessageHeader;
unsigned int MaxControlTransfer;
} MBIM_OPEN_MSG_T;
typedef struct {
MBIM_MESSAGE_HEADER MessageHeader;
unsigned int Status;
} MBIM_OPEN_DONE_T;
typedef struct {
int client_fd;
int client_idx;
} CM_CLIENT_T;
static unsigned char cm_recv_buffer[4096];
static CM_CLIENT_T cm_clients[CM_MAX_CLIENT];
static int verbose = 0;
const char * get_time(void) {
static char time_buf[128];
struct timeval tv;
time_t time;
suseconds_t millitm;
struct tm *ti;
gettimeofday (&tv, NULL);
time= tv.tv_sec;
millitm = (tv.tv_usec + 500) / 1000;
if (millitm == 1000) {
++time;
millitm = 0;
}
ti = localtime(&time);
sprintf(time_buf, "[%02d-%02d_%02d:%02d:%02d:%03d]", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm);
return time_buf;
}
#define mbim_debug(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0);
static int non_block_write(int fd, void *data, int len)
{
int ret;
struct pollfd pollfd = {fd, POLLOUT, 0};
ret = poll(&pollfd, 1, 3000);
if (ret <= 0) {
mbim_debug("%s poll ret=%d, errno: %d(%s)\n", __func__, ret, errno, strerror(errno));
}
ret = write (fd, data, len);
if (ret != len)
mbim_debug("%s write ret=%d, errno: %d(%s)\n", __func__, ret, errno, strerror(errno));
return len;
}
static int mbim_send_open_msg(int mbim_dev_fd, uint32_t MaxControlTransfer) {
MBIM_OPEN_MSG_T open_msg;
MBIM_OPEN_MSG_T *pRequest = &open_msg;
pRequest->MessageHeader.MessageType = htole32(MBIM_OPEN_MSG);
pRequest->MessageHeader.MessageLength = htole32(sizeof(MBIM_OPEN_MSG_T));
pRequest->MessageHeader.TransactionId = htole32(1);
pRequest->MaxControlTransfer = htole32(MaxControlTransfer);
mbim_debug("%s()\n", __func__);
return non_block_write(mbim_dev_fd, pRequest, sizeof(MBIM_OPEN_MSG_T));
}
/*
* parameter: proxy name
* return: local proxy server fd or -1
*/
static int proxy_make_server(const char *proxy_name)
{
int len, flag;
struct sockaddr_un sockaddr;
int mbim_server_fd;
mbim_server_fd = socket(AF_LOCAL, SOCK_STREAM, 0);
if (mbim_server_fd < 0) {
mbim_debug("socket failed: %s\n", strerror(errno));
return -1;
}
if (fcntl(mbim_server_fd, F_SETFL, fcntl(mbim_server_fd, F_GETFL) | O_NONBLOCK) < 0)
mbim_debug("fcntl set server(%d) NONBLOCK attribute failed: %s\n", mbim_server_fd, strerror(errno));
memset(&sockaddr, 0, sizeof(sockaddr));
sockaddr.sun_family = AF_LOCAL;
sockaddr.sun_path[0] = 0;
snprintf(sockaddr.sun_path, UNIX_PATH_MAX, "0%s", proxy_name);
sockaddr.sun_path[0] = '\0'; // string starts with leading '\0'
flag = 1;
if (setsockopt(mbim_server_fd, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof(flag)) < 0) {
safe_close(mbim_server_fd);
mbim_debug("setsockopt failed\n");
}
len = strlen(proxy_name) + offsetof(struct sockaddr_un, sun_path) + 1;
if (bind(mbim_server_fd, (struct sockaddr*)&sockaddr, len) < 0) {
safe_close(mbim_server_fd);
mbim_debug("bind failed: %s\n", strerror(errno));
return -1;
}
listen(mbim_server_fd, 4);
return mbim_server_fd;
}
static int handle_client_connect(int server_fd)
{
int i, client_fd;
struct sockaddr_in cli_addr;
socklen_t len = sizeof(cli_addr);
client_fd = accept(server_fd, (struct sockaddr *)&cli_addr, &len);
if (client_fd < 0) {
mbim_debug("proxy accept failed: %s\n", strerror(errno));
return -1;
}
if (fcntl(client_fd, F_SETFL, fcntl(client_fd, F_GETFL) | O_NONBLOCK) < 0)
mbim_debug("fcntl set client(%d) NONBLOCK attribute failed: %s\n", client_fd, strerror(errno));
for (i = 0; i < CM_MAX_CLIENT; i++) {
if (cm_clients[i].client_fd <= 0) {
cm_clients[i].client_fd = client_fd;
cm_clients[i].client_idx= i+1;
mbim_debug("%s client_fd=%d, client_idx=%d\n", __func__, cm_clients[i].client_fd, cm_clients[i].client_idx);
return 0;
}
}
close(client_fd);
return -1;
}
static void handle_client_disconnect(int client_fd)
{
int i;
for (i = 0; i < CM_MAX_CLIENT; i++) {
if (cm_clients[i].client_fd == client_fd) {
mbim_debug("%s client_fd=%d, client_idx=%d\n", __func__, cm_clients[i].client_fd, cm_clients[i].client_idx);
safe_close(cm_clients[i].client_fd);
return;
}
}
}
static int handle_client_request(int mbim_dev_fd, int client_fd, void *pdata, int len)
{
int i;
int client_idx = -1;
int ret;
MBIM_MESSAGE_HEADER *pRequest = (MBIM_MESSAGE_HEADER *)pdata;
unsigned int TransactionId = le32toh(pRequest->TransactionId);
for (i = 0; i < CM_MAX_CLIENT; i++) {
if (cm_clients[i].client_fd == client_fd) {
client_idx = cm_clients[i].client_idx;
break;
}
}
if (client_idx == -1) {
goto error;
}
if (le32toh(pRequest->MessageType) == MBIM_OPEN_MSG
|| le32toh(pRequest->MessageType) == MBIM_CLOSE_MSG) {
MBIM_OPEN_DONE_T OpenDone;
OpenDone.MessageHeader.MessageType = htole32(le32toh(pRequest->MessageType) | 0x80000000);
OpenDone.MessageHeader.MessageLength = htole32(sizeof(OpenDone));
OpenDone.MessageHeader.TransactionId = htole32(TransactionId);
OpenDone.Status = htole32(0);
non_block_write (client_fd, &OpenDone, sizeof(OpenDone));
return 0;
}
/* transfer TransicationID to proxy transicationID and record in sender list */
pRequest->TransactionId = htole32(TransactionId | (client_idx << TID_SHIFT));
if (verbose) mbim_debug("REQ client_fd=%d, client_idx=%d, tid=%u\n",
cm_clients[client_idx].client_fd, cm_clients[client_idx].client_idx, TransactionId);
ret = non_block_write (mbim_dev_fd, pRequest, len);
if (ret == len)
return 0;
error:
return -1;
}
/*
* Will read message from device and transfer it to clients/client
* Notice:
* unsocial message will be send to all clients
*/
static int handle_device_response(void *pdata, int len)
{
int i;
MBIM_MESSAGE_HEADER *pResponse = (MBIM_MESSAGE_HEADER *)pdata;
unsigned int TransactionId = le32toh(pResponse->TransactionId);
/* unsocial/function error message */
if (TransactionId == 0) {
for (i = 0; i < CM_MAX_CLIENT; i++) {
if (cm_clients[i].client_fd > 0) {
non_block_write(cm_clients[i].client_fd, pResponse, len);
}
}
}
else {
/* try to find the sender */
int client_idx = (TransactionId >> TID_SHIFT);
for (i = 0; i < CM_MAX_CLIENT; i++) {
if (cm_clients[i].client_idx == client_idx && cm_clients[i].client_fd > 0) {
TransactionId &= TID_MASK;
pResponse->TransactionId = htole32(TransactionId);
if (verbose) mbim_debug("RSP client_fd=%d, client_idx=%d, tid=%u\n",
cm_clients[i].client_fd, cm_clients[i].client_idx, TransactionId);
non_block_write(cm_clients[i].client_fd, pResponse, len);
break;
}
}
if (i == CM_MAX_CLIENT) {
mbim_debug("%s nobody care tid=%u\n", __func__, TransactionId);
}
}
return 0;
}
static int proxy_loop(int mbim_dev_fd)
{
int i;
int mbim_server_fd = -1;
while (mbim_dev_fd > 0) {
struct pollfd pollfds[2+CM_MAX_CLIENT];
int ne, ret, nevents = 0;
pollfds[nevents].fd = mbim_dev_fd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
if (mbim_server_fd > 0) {
pollfds[nevents].fd = mbim_server_fd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
for (i = 0; i < CM_MAX_CLIENT; i++) {
if (cm_clients[i].client_fd > 0) {
pollfds[nevents].fd = cm_clients[i].client_fd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
}
}
}
ret = poll(pollfds, nevents, (mbim_server_fd > 0) ? -1 : (10*1000));
if (ret <= 0) {
goto error;
}
for (ne = 0; ne < nevents; ne++) {
int fd = pollfds[ne].fd;
short revents = pollfds[ne].revents;
if (revents & (POLLERR | POLLHUP | POLLNVAL)) {
mbim_debug("%s poll fd = %d, revents = %04x\n", __func__, fd, revents);
if (fd == mbim_dev_fd) {
goto error;
} else if(fd == mbim_server_fd) {
} else {
handle_client_disconnect(fd);
}
continue;
}
if (!(pollfds[ne].revents & POLLIN)) {
continue;
}
if (fd == mbim_server_fd) {
handle_client_connect(fd);
}
else {
int len = read(fd, cm_recv_buffer, sizeof(cm_recv_buffer));
if (len <= 0) {
mbim_debug("%s read fd=%d, len=%d, errno: %d(%s)\n", __func__, fd, len, errno, strerror(errno));
if (fd == mbim_dev_fd)
goto error;
else
handle_client_disconnect(fd);
return len;
}
if (fd == mbim_dev_fd) {
if (mbim_server_fd == -1) {
MBIM_OPEN_DONE_T *pOpenDone = (MBIM_OPEN_DONE_T *)cm_recv_buffer;
if (le32toh(pOpenDone->MessageHeader.MessageType) == MBIM_OPEN_DONE) {
mbim_debug("receive MBIM_OPEN_DONE, status=%d\n", htole32(pOpenDone->Status));
if (htole32(pOpenDone->Status))
goto error;
mbim_server_fd = proxy_make_server(QUECTEL_MBIM_PROXY);
mbim_debug("mbim_server_fd=%d\n", mbim_server_fd);
}
}
else {
handle_device_response(cm_recv_buffer, len);
}
}
else {
handle_client_request(mbim_dev_fd, fd, cm_recv_buffer, len);
}
}
}
}
error:
safe_close(mbim_server_fd);
for (i = 0; i < CM_MAX_CLIENT; i++) {
safe_close(cm_clients[i].client_fd);
}
mbim_debug("%s exit\n", __func__);
return 0;
}
/*
* How to use this proxy?
* 1. modprobe -a 8021q
* 2. Create network interface for channels:
* ip link add link wwan0 name wwan0.1 type vlan id 1
* ip link add link wwan0 name wwan0.2 type vlan id 2
* 3. Start './mbim-proxy' with -d 'device'
* 4. Start Clients: ./quectel-CM -n id1
* 5. Start Clients: ./quectel-CM -n id2
* ...
* Notice:
* mbim-proxy can work in backgroud as a daemon
* '-n' sessionID
* The modem may not support multi-PDN mode or how many PDN it supports is undefined. It depends!!!
* Besides, some modem also may not support some sessionID. For instance EC20 doesn't support SessionId 1...
*/
int main(int argc, char **argv)
{
int optidx = 0;
int opt;
char *optstr = "d:vh";
const char *device = "/dev/cdc-wdm0";
struct option options[] = {
{"verbose", no_argument, NULL, 'v'},
{"device", required_argument, NULL, 'd'},
{0, 0, 0, 0},
};
while ((opt = getopt_long(argc, argv, optstr, options, &optidx)) != -1) {
switch (opt) {
case 'v':
verbose = 1;
break;
case 'd':
device = optarg;
break;
case 'h':
mbim_debug("-h Show this message\n");
mbim_debug("-v Verbose\n");
mbim_debug("-d [device] MBIM device\n");
return 0;
default:
mbim_debug("illegal argument\n");
return -1;
}
}
if (!device) {
mbim_debug("Missing parameter: device\n");
return -1;
}
while (1) {
int mbim_dev_fd = open(device, O_RDWR | O_NONBLOCK | O_NOCTTY);
if (mbim_dev_fd < 0) {
mbim_debug("cannot open mbim_device %s: %s\n", device, strerror(errno));
sleep(2);
continue;
}
mbim_debug ("mbim_dev_fd=%d\n", mbim_dev_fd);
memset(cm_clients, 0, sizeof(cm_clients));
mbim_send_open_msg(mbim_dev_fd, sizeof(cm_recv_buffer));
proxy_loop(mbim_dev_fd);
safe_close(mbim_dev_fd);
}
return -1;
}

View File

@ -0,0 +1,700 @@
/******************************************************************************
@file quectel-qmi-proxy.c
@brief The qmi proxy.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <stdarg.h>
#include <stddef.h>
#include <fcntl.h>
#include <pthread.h>
#include <poll.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <linux/un.h>
#include <linux/if.h>
#include <dirent.h>
#include <signal.h>
#include <inttypes.h>
#include "qendian.h"
#include "qlist.h"
#include "QCQMI.h"
#include "QCQCTL.h"
#include "QCQMUX.h"
#ifndef MIN
#define MIN(a, b) ((a) < (b)? (a): (b))
#endif
const char * get_time(void) {
static char time_buf[128];
struct timeval tv;
time_t time;
suseconds_t millitm;
struct tm *ti;
gettimeofday (&tv, NULL);
time= tv.tv_sec;
millitm = (tv.tv_usec + 500) / 1000;
if (millitm == 1000) {
++time;
millitm = 0;
}
ti = localtime(&time);
sprintf(time_buf, "[%02d-%02d_%02d:%02d:%02d:%03d]", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm);
return time_buf;
}
#define dprintf(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0);
#define SYSCHECK(c) do{if((c)<0) {dprintf("%s %d error: '%s' (code: %d)\n", __func__, __LINE__, strerror(errno), errno); return -1;}}while(0)
#define cfmakenoblock(fd) do{fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK);}while(0)
typedef struct {
struct qlistnode qnode;
int ClientFd;
QCQMIMSG qmi[0];
} QMI_PROXY_MSG;
typedef struct {
struct qlistnode qnode;
uint8_t QMIType;
uint8_t ClientId;
unsigned AccessTime;
} QMI_PROXY_CLINET;
typedef struct {
struct qlistnode qnode;
struct qlistnode client_qnode;
int ClientFd;
unsigned AccessTime;
} QMI_PROXY_CONNECTION;
#ifdef QUECTEL_QMI_MERGE
#define MERGE_PACKET_IDENTITY 0x2c7c
#define MERGE_PACKET_VERSION 0x0001
#define MERGE_PACKET_MAX_PAYLOAD_SIZE 56
typedef struct __QMI_MSG_HEADER {
uint16_t idenity;
uint16_t version;
uint16_t cur_len;
uint16_t total_len;
} QMI_MSG_HEADER;
typedef struct __QMI_MSG_PACKET {
QMI_MSG_HEADER header;
uint16_t len;
char buf[4096];
} QMI_MSG_PACKET;
#endif
static int qmi_proxy_quit = 0;
static pthread_t thread_id = 0;
static int cdc_wdm_fd = -1;
static int qmi_proxy_server_fd = -1;
static struct qlistnode qmi_proxy_connection;
static struct qlistnode qmi_proxy_ctl_msg;
static int verbose_debug = 0;
static int modem_reset_flag = 0;
static int qmi_sync_done = 0;
static uint8_t qmi_buf[4096];
static int send_qmi_to_cdc_wdm(PQCQMIMSG pQMI);
#ifdef QUECTEL_QMI_MERGE
static int merge_qmi_rsp_packet(void *buf, ssize_t *src_size) {
static QMI_MSG_PACKET s_QMIPacket;
QMI_MSG_HEADER *header = NULL;
ssize_t size = *src_size;
if((uint16_t)size < sizeof(QMI_MSG_HEADER))
return -1;
header = (QMI_MSG_HEADER *)buf;
if(le16toh(header->idenity) != MERGE_PACKET_IDENTITY || le16toh(header->version) != MERGE_PACKET_VERSION || le16toh(header->cur_len) > le16toh(header->total_len))
return -1;
if(le16toh(header->cur_len) == le16toh(header->total_len)) {
*src_size = le16toh(header->total_len);
memcpy(buf, buf + sizeof(QMI_MSG_HEADER), *src_size);
s_QMIPacket.len = 0;
return 0;
}
memcpy(s_QMIPacket.buf + s_QMIPacket.len, buf + sizeof(QMI_MSG_HEADER), le16toh(header->cur_len));
s_QMIPacket.len += le16toh(header->cur_len);
if (le16toh(header->cur_len) < MERGE_PACKET_MAX_PAYLOAD_SIZE || s_QMIPacket.len >= le16toh(header->total_len)) {
memcpy(buf, s_QMIPacket.buf, s_QMIPacket.len);
*src_size = s_QMIPacket.len;
s_QMIPacket.len = 0;
return 0;
}
return -1;
}
#endif
static int create_local_server(const char *name) {
int sockfd = -1;
int reuse_addr = 1;
struct sockaddr_un sockaddr;
socklen_t alen;
/*Create server socket*/
SYSCHECK(sockfd = socket(AF_LOCAL, SOCK_STREAM, 0));
memset(&sockaddr, 0, sizeof(sockaddr));
sockaddr.sun_family = AF_LOCAL;
sockaddr.sun_path[0] = 0;
memcpy(sockaddr.sun_path + 1, name, strlen(name) );
alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1;
SYSCHECK(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr)));
if(bind(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) {
dprintf("bind %s errno: %d (%s)\n", name, errno, strerror(errno));
close(sockfd);
return -1;
}
dprintf("local server: %s sockfd = %d\n", name, sockfd);
cfmakenoblock(sockfd);
listen(sockfd, 1);
return sockfd;
}
static void accept_qmi_connection(int serverfd) {
int clientfd = -1;
unsigned char addr[128];
socklen_t alen = sizeof(addr);
QMI_PROXY_CONNECTION *qmi_con;
clientfd = accept(serverfd, (struct sockaddr *)addr, &alen);
qmi_con = (QMI_PROXY_CONNECTION *)malloc(sizeof(QMI_PROXY_CONNECTION));
if (qmi_con) {
qlist_init(&qmi_con->qnode);
qlist_init(&qmi_con->client_qnode);
qmi_con->ClientFd= clientfd;
qmi_con->AccessTime = 0;
dprintf("+++ ClientFd=%d\n", qmi_con->ClientFd);
qlist_add_tail(&qmi_proxy_connection, &qmi_con->qnode);
}
cfmakenoblock(clientfd);
}
static void cleanup_qmi_connection(int clientfd, int clientDisconnect) {
struct qlistnode *con_node, *qmi_node;
qlist_for_each(con_node, &qmi_proxy_connection) {
QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode);
if (qmi_con->ClientFd == clientfd) {
while (!qlist_empty(&qmi_con->client_qnode)) {
QMI_PROXY_CLINET *qmi_client = qnode_to_item(qlist_head(&qmi_con->client_qnode), QMI_PROXY_CLINET, qnode);
if (clientDisconnect) {
int size = 17;
QMI_PROXY_MSG *qmi_msg = malloc(sizeof(QMI_PROXY_MSG) + size);
PQCQMIMSG pQMI = &qmi_msg->qmi[0];
dprintf("xxx ClientFd=%d QMIType=%d ClientId=%d\n", qmi_con->ClientFd, qmi_client->QMIType, qmi_client->ClientId);
qlist_init(&qmi_msg->qnode);
qmi_msg->ClientFd = qmi_proxy_server_fd;
pQMI->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI;
pQMI->QMIHdr.Length = htole16(16);
pQMI->QMIHdr.CtlFlags = 0x00;
pQMI->QMIHdr.QMIType = QMUX_TYPE_CTL;
pQMI->QMIHdr.ClientId= 0x00;
pQMI->CTLMsg.ReleaseClientIdReq.CtlFlags = QMICTL_FLAG_REQUEST;
pQMI->CTLMsg.ReleaseClientIdReq.TransactionId = 255;
pQMI->CTLMsg.ReleaseClientIdReq.QMICTLType = htole16(QMICTL_RELEASE_CLIENT_ID_REQ);
pQMI->CTLMsg.ReleaseClientIdReq.Length = htole16(5);
pQMI->CTLMsg.ReleaseClientIdReq.TLVType = QCTLV_TYPE_REQUIRED_PARAMETER;
pQMI->CTLMsg.ReleaseClientIdReq.TLVLength = htole16(2);
pQMI->CTLMsg.ReleaseClientIdReq.QMIType = qmi_client->QMIType;
pQMI->CTLMsg.ReleaseClientIdReq.ClientId = qmi_client->ClientId;
if (qlist_empty(&qmi_proxy_ctl_msg))
send_qmi_to_cdc_wdm(pQMI);
qlist_add_tail(&qmi_proxy_ctl_msg, &qmi_msg->qnode);
}
qlist_remove(&qmi_client->qnode);
free(qmi_client);
}
qlist_for_each(qmi_node, &qmi_proxy_ctl_msg) {
QMI_PROXY_MSG *qmi_msg = qnode_to_item(qmi_node, QMI_PROXY_MSG, qnode);
if (qmi_msg->ClientFd == qmi_con->ClientFd) {
qlist_remove(&qmi_msg->qnode);
free(qmi_msg);
break;
}
}
dprintf("--- ClientFd=%d\n", qmi_con->ClientFd);
close(qmi_con->ClientFd);
qlist_remove(&qmi_con->qnode);
free(qmi_con);
break;
}
}
}
static void get_client_id(QMI_PROXY_CONNECTION *qmi_con, PQMICTL_GET_CLIENT_ID_RESP_MSG pClient) {
if (pClient->QMIResult == 0 && pClient->QMIError == 0) {
QMI_PROXY_CLINET *qmi_client = (QMI_PROXY_CLINET *)malloc(sizeof(QMI_PROXY_CLINET));
qlist_init(&qmi_client->qnode);
qmi_client->QMIType = pClient->QMIType;
qmi_client->ClientId = pClient->ClientId;
qmi_client->AccessTime = 0;
dprintf("+++ ClientFd=%d QMIType=%d ClientId=%d\n", qmi_con->ClientFd, qmi_client->QMIType, qmi_client->ClientId);
qlist_add_tail(&qmi_con->client_qnode, &qmi_client->qnode);
}
}
static void release_client_id(QMI_PROXY_CONNECTION *qmi_con, PQMICTL_RELEASE_CLIENT_ID_RESP_MSG pClient) {
struct qlistnode *client_node;
if (pClient->QMIResult == 0 && pClient->QMIError == 0) {
qlist_for_each (client_node, &qmi_con->client_qnode) {
QMI_PROXY_CLINET *qmi_client = qnode_to_item(client_node, QMI_PROXY_CLINET, qnode);
if (pClient->QMIType == qmi_client->QMIType && pClient->ClientId == qmi_client->ClientId) {
dprintf("--- ClientFd=%d QMIType=%d ClientId=%d\n", qmi_con->ClientFd, qmi_client->QMIType, qmi_client->ClientId);
qlist_remove(&qmi_client->qnode);
free(qmi_client);
break;
}
}
}
}
static void dump_qmi(PQCQMIMSG pQMI, int fd, const char flag)
{
if (verbose_debug)
{
unsigned i;
unsigned size = le16toh(pQMI->QMIHdr.Length) + 1;
char buf[128];
int cnt = 0;
cnt += snprintf(buf + cnt, sizeof(buf) - cnt, "%c %d %u: ", flag, fd, size);
for (i = 0; i < size && i < 24; i++)
cnt += snprintf(buf + cnt, sizeof(buf) - cnt, "%02x ", ((uint8_t *)pQMI)[i]);
dprintf("%s\n", buf)
}
}
static int send_qmi_to_cdc_wdm(PQCQMIMSG pQMI) {
struct pollfd pollfds[]= {{cdc_wdm_fd, POLLOUT, 0}};
ssize_t ret = 0;
do {
ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000);
} while (ret == -1 && errno == EINTR && qmi_proxy_quit == 0);
if (pollfds[0].revents & POLLOUT) {
ssize_t size = le16toh(pQMI->QMIHdr.Length) + 1;
ret = write(cdc_wdm_fd, pQMI, size);
dump_qmi(pQMI, cdc_wdm_fd, 'w');
}
return ret;
}
static int send_qmi_to_client(PQCQMIMSG pQMI, int clientFd) {
struct pollfd pollfds[]= {{clientFd, POLLOUT, 0}};
ssize_t ret = 0;
do {
ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000);
} while (ret == -1 && errno == EINTR && qmi_proxy_quit == 0);
if (pollfds[0].revents & POLLOUT) {
ssize_t size = le16toh(pQMI->QMIHdr.Length) + 1;
ret = write(clientFd, pQMI, size);
dump_qmi(pQMI, clientFd, 'w');
}
return ret;
}
static void recv_qmi_from_dev(PQCQMIMSG pQMI) {
struct qlistnode *con_node, *client_node;
if (qmi_proxy_server_fd == -1) {
qmi_sync_done = 1;
}
else if (pQMI->QMIHdr.QMIType == QMUX_TYPE_CTL) {
if (pQMI->CTLMsg.QMICTLMsgHdr.CtlFlags == QMICTL_CTL_FLAG_RSP) {
if (!qlist_empty(&qmi_proxy_ctl_msg)) {
QMI_PROXY_MSG *qmi_msg = qnode_to_item(qlist_head(&qmi_proxy_ctl_msg), QMI_PROXY_MSG, qnode);
if (qmi_msg->qmi[0].CTLMsg.QMICTLMsgHdrRsp.TransactionId != pQMI->CTLMsg.QMICTLMsgHdrRsp.TransactionId
|| qmi_msg->qmi[0].CTLMsg.QMICTLMsgHdrRsp.QMICTLType != pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) {
dprintf("ERROR: ctl rsp tid:%d, type:%d - ctl req %d, %d\n",
pQMI->CTLMsg.QMICTLMsgHdrRsp.TransactionId, pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType,
qmi_msg->qmi[0].CTLMsg.QMICTLMsgHdrRsp.TransactionId, qmi_msg->qmi[0].CTLMsg.QMICTLMsgHdrRsp.QMICTLType);
}
else if (qmi_msg->ClientFd == qmi_proxy_server_fd) {
if (le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_RELEASE_CLIENT_ID_RESP) {
dprintf("--- ClientFd=%d QMIType=%d ClientId=%d\n", qmi_proxy_server_fd,
pQMI->CTLMsg.ReleaseClientIdRsp.QMIType, pQMI->CTLMsg.ReleaseClientIdRsp.ClientId);
}
}
else {
qlist_for_each(con_node, &qmi_proxy_connection) {
QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode);
if (qmi_con->ClientFd == qmi_msg->ClientFd) {
send_qmi_to_client(pQMI, qmi_msg->ClientFd);
if (le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_GET_CLIENT_ID_RESP) {
get_client_id(qmi_con, &pQMI->CTLMsg.GetClientIdRsp);
}
else if (le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_RELEASE_CLIENT_ID_RESP) {
release_client_id(qmi_con, &pQMI->CTLMsg.ReleaseClientIdRsp);
}
else {
}
}
}
}
qlist_remove(&qmi_msg->qnode);
free(qmi_msg);
if (!qlist_empty(&qmi_proxy_ctl_msg)) {
QMI_PROXY_MSG *qmi_msg = qnode_to_item(qlist_head(&qmi_proxy_ctl_msg), QMI_PROXY_MSG, qnode);
send_qmi_to_cdc_wdm(qmi_msg->qmi);
}
}
}
else if (pQMI->QMIHdr.QMIType == QMICTL_CTL_FLAG_IND) {
if (le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_REVOKE_CLIENT_ID_IND) {
modem_reset_flag = 1;
}
}
}
else {
qlist_for_each(con_node, &qmi_proxy_connection) {
QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode);
qlist_for_each(client_node, &qmi_con->client_qnode) {
QMI_PROXY_CLINET *qmi_client = qnode_to_item(client_node, QMI_PROXY_CLINET, qnode);
if (pQMI->QMIHdr.QMIType == qmi_client->QMIType) {
if (pQMI->QMIHdr.ClientId == 0 || pQMI->QMIHdr.ClientId == qmi_client->ClientId) {
send_qmi_to_client(pQMI, qmi_con->ClientFd);
}
}
}
}
}
}
static int recv_qmi_from_client(PQCQMIMSG pQMI, unsigned size, int clientfd) {
if (qmi_proxy_server_fd == -1)
return -1;
if (pQMI->QMIHdr.QMIType == QMUX_TYPE_CTL) {
QMI_PROXY_MSG *qmi_msg;
if (pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType == QMICTL_SYNC_REQ) {
dprintf("do not allow client send QMICTL_SYNC_REQ\n");
return 0;
}
qmi_msg = malloc(sizeof(QMI_PROXY_MSG) + size);
qlist_init(&qmi_msg->qnode);
qmi_msg->ClientFd = clientfd;
memcpy(qmi_msg->qmi, pQMI, size);
if (qlist_empty(&qmi_proxy_ctl_msg))
send_qmi_to_cdc_wdm(pQMI);
qlist_add_tail(&qmi_proxy_ctl_msg, &qmi_msg->qnode);
}
else {
send_qmi_to_cdc_wdm(pQMI);
}
return 0;
}
static int qmi_proxy_init(unsigned retry) {
unsigned i;
QCQMIMSG _QMI;
PQCQMIMSG pQMI = &_QMI;
dprintf("%s enter\n", __func__);
pQMI->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI;
pQMI->QMIHdr.CtlFlags = 0x00;
pQMI->QMIHdr.QMIType = QMUX_TYPE_CTL;
pQMI->QMIHdr.ClientId= 0x00;
pQMI->CTLMsg.QMICTLMsgHdr.CtlFlags = QMICTL_FLAG_REQUEST;
qmi_sync_done = 0;
for (i = 0; i < retry; i++) {
pQMI->CTLMsg.SyncReq.TransactionId = i+1;
pQMI->CTLMsg.SyncReq.QMICTLType = htole16(QMICTL_SYNC_REQ);
pQMI->CTLMsg.SyncReq.Length = htole16(0);
pQMI->QMIHdr.Length =
htole16(le16toh(pQMI->CTLMsg.QMICTLMsgHdr.Length) + sizeof(QCQMI_HDR) + sizeof(QCQMICTL_MSG_HDR) - 1);
if (send_qmi_to_cdc_wdm(pQMI) <= 0)
break;
sleep(1);
if (qmi_sync_done)
break;
}
dprintf("%s %s\n", __func__, qmi_sync_done ? "succful" : "fail");
return qmi_sync_done ? 0 : -1;
}
static void *qmi_proxy_loop(void *param)
{
PQCQMIMSG pQMI = (PQCQMIMSG)qmi_buf;
struct qlistnode *con_node;
QMI_PROXY_CONNECTION *qmi_con;
(void)param;
dprintf("%s enter thread_id %p\n", __func__, (void *)pthread_self());
qlist_init(&qmi_proxy_connection);
qlist_init(&qmi_proxy_ctl_msg);
while (cdc_wdm_fd > 0 && qmi_proxy_quit == 0) {
struct pollfd pollfds[2+64];
int ne, ret, nevents = 0;
ssize_t nreads;
pollfds[nevents].fd = cdc_wdm_fd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
if (qmi_proxy_server_fd > 0) {
pollfds[nevents].fd = qmi_proxy_server_fd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
}
qlist_for_each(con_node, &qmi_proxy_connection) {
qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode);
pollfds[nevents].fd = qmi_con->ClientFd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
if (nevents == (sizeof(pollfds)/sizeof(pollfds[0])))
break;
}
#if 0
dprintf("poll ");
for (ne = 0; ne < nevents; ne++) {
dprintf("%d ", pollfds[ne].fd);
}
dprintf("\n");
#endif
do {
//ret = poll(pollfds, nevents, -1);
ret = poll(pollfds, nevents, (qmi_proxy_server_fd > 0) ? -1 : 200);
} while (ret == -1 && errno == EINTR && qmi_proxy_quit == 0);
if (ret < 0) {
dprintf("%s poll=%d, errno: %d (%s)\n", __func__, ret, errno, strerror(errno));
goto qmi_proxy_loop_exit;
}
for (ne = 0; ne < nevents; ne++) {
int fd = pollfds[ne].fd;
short revents = pollfds[ne].revents;
if (revents & (POLLERR | POLLHUP | POLLNVAL)) {
dprintf("%s poll fd = %d, revents = %04x\n", __func__, fd, revents);
if (fd == cdc_wdm_fd) {
goto qmi_proxy_loop_exit;
} else if(fd == qmi_proxy_server_fd) {
} else {
cleanup_qmi_connection(fd, 1);
}
continue;
}
if (!(pollfds[ne].revents & POLLIN)) {
continue;
}
if (fd == qmi_proxy_server_fd) {
accept_qmi_connection(fd);
}
else if (fd == cdc_wdm_fd) {
nreads = read(fd, pQMI, sizeof(qmi_buf));
if (nreads <= 0) {
dprintf("%s read=%d errno: %d (%s)\n", __func__, (int)nreads, errno, strerror(errno));
goto qmi_proxy_loop_exit;
}
#ifdef QUECTEL_QMI_MERGE
if(merge_qmi_rsp_packet(pQMI, &nreads))
continue;
#endif
if (nreads != (le16toh(pQMI->QMIHdr.Length) + 1)) {
dprintf("%s nreads=%d, pQCQMI->QMIHdr.Length = %d\n", __func__, (int)nreads, le16toh(pQMI->QMIHdr.Length));
continue;
}
dump_qmi(pQMI, fd, 'r');
recv_qmi_from_dev(pQMI);
if (modem_reset_flag)
goto qmi_proxy_loop_exit;
}
else {
nreads = read(fd, pQMI, sizeof(qmi_buf));
if (nreads <= 0) {
dprintf("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno));
cleanup_qmi_connection(fd, 1);
break;
}
if (nreads != (le16toh(pQMI->QMIHdr.Length) + 1)) {
dprintf("%s nreads=%d, pQCQMI->QMIHdr.Length = %d\n", __func__, (int)nreads, le16toh(pQMI->QMIHdr.Length));
continue;
}
dump_qmi(pQMI, fd, 'r');
recv_qmi_from_client(pQMI, nreads, fd);
}
}
}
qmi_proxy_loop_exit:
while (!qlist_empty(&qmi_proxy_connection)) {
QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(qlist_head(&qmi_proxy_connection), QMI_PROXY_CONNECTION, qnode);
cleanup_qmi_connection(qmi_con->ClientFd, 0);
}
dprintf("%s exit, thread_id %p\n", __func__, (void *)pthread_self());
return NULL;
}
static void usage(void) {
dprintf(" -d <device_name> A valid qmi device\n"
" default /dev/cdc-wdm0, but cdc-wdm0 may be invalid\n"
" -i <netcard_name> netcard name\n"
" -v Will show all details\n");
}
static void sig_action(int sig) {
if (qmi_proxy_quit++ == 0) {
if (thread_id)
pthread_kill(thread_id, sig);
}
}
int main(int argc, char *argv[]) {
int opt;
char cdc_wdm[32+1] = "/dev/cdc-wdm0";
char servername[64] = {0};
optind = 1;
signal(SIGINT, sig_action);
while ( -1 != (opt = getopt(argc, argv, "d:i:vh"))) {
switch (opt) {
case 'd':
strcpy(cdc_wdm, optarg);
break;
case 'v':
verbose_debug = 1;
break;
default:
usage();
return 0;
}
}
sprintf(servername, "quectel-qmi-proxy%c", cdc_wdm[strlen(cdc_wdm)-1]);
dprintf("Will use cdc-wdm='%s', proxy='%s'\n", cdc_wdm, servername);
while (qmi_proxy_quit == 0) {
cdc_wdm_fd = open(cdc_wdm, O_RDWR | O_NONBLOCK | O_NOCTTY);
if (cdc_wdm_fd == -1) {
dprintf("Failed to open %s, errno: %d (%s)\n", cdc_wdm, errno, strerror(errno));
sleep(3);
continue;
}
cfmakenoblock(cdc_wdm_fd);
/* no qmi_proxy_loop lives, create one */
pthread_create(&thread_id, NULL, qmi_proxy_loop, NULL);
if (qmi_proxy_init(60) == 0) {
qmi_proxy_server_fd = create_local_server(servername);
dprintf("qmi_proxy_server_fd = %d\n", qmi_proxy_server_fd);
if (qmi_proxy_server_fd == -1) {
dprintf("Failed to create %s, errno: %d (%s)\n", servername, errno, strerror(errno));
pthread_cancel(thread_id);
}
}
else {
pthread_cancel(thread_id);
}
pthread_join(thread_id, NULL);
thread_id = 0;
if (qmi_proxy_server_fd != -1) {
dprintf("close server %s\n", servername);
close(qmi_proxy_server_fd);
qmi_proxy_server_fd = -1;
}
close(cdc_wdm_fd);
cdc_wdm_fd = -1;
if (qmi_proxy_quit == 0)
sleep(modem_reset_flag ? 30 : 3);
modem_reset_flag = 0;
}
return 0;
}

View File

@ -0,0 +1,894 @@
/******************************************************************************
@file quectel-qrtr-proxy.c
@brief The qrtr proxy.
DESCRIPTION
Connectivity Management Tool for USB/PCIE network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <stdarg.h>
#include <stddef.h>
#include <fcntl.h>
#include <pthread.h>
#include <poll.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <linux/un.h>
#include <linux/if.h>
#include <dirent.h>
#include <signal.h>
#include <inttypes.h>
#include <linux/socket.h>
#include "qrtr.h"
#include "qendian.h"
#include "qlist.h"
#include "QCQMI.h"
#include "QCQCTL.h"
#include "QCQMUX.h"
static const char * get_time(void) {
static char time_buf[128];
struct timeval tv;
time_t time;
suseconds_t millitm;
struct tm *ti;
gettimeofday (&tv, NULL);
time= tv.tv_sec;
millitm = (tv.tv_usec + 500) / 1000;
if (millitm == 1000) {
++time;
millitm = 0;
}
ti = localtime(&time);
sprintf(time_buf, "[%02d-%02d_%02d:%02d:%02d:%03d]", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm);
return time_buf;
}
#define dprintf(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0);
#define SYSCHECK(c) do{if((c)<0) {dprintf("%s %d error: '%s' (code: %d)\n", __func__, __LINE__, strerror(errno), errno); return -1;}}while(0)
#define cfmakenoblock(fd) do{fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK);}while(0)
#define align_4(_len) (((_len) + 3) & ~3)
typedef struct {
struct qlistnode qnode;
int ClientFd;
QCQMIMSG qrtr[0];
} QRTR_PROXY_MSG;
typedef struct {
struct qlistnode qnode;
uint8_t QMIType;
uint8_t ClientId;
uint32_t node_id;
uint32_t port_id;
unsigned AccessTime;
} QRTR_PROXY_CLINET;
typedef struct {
struct qlistnode qnode;
struct qlistnode client_qnode;
int ClientFd;
unsigned AccessTime;
} QRTR_PROXY_CONNECTION;
typedef struct {
struct qlistnode qnode;
uint32_t service;
uint32_t version;
uint32_t instance;
uint32_t node;
uint32_t port;
__le32 src_node_id;
__le32 src_port_id;
} QRTR_SERVICE;
static int qrtr_proxy_quit = 0;
static pthread_t thread_id = 0;
static int cdc_wdm_fd = -1;
static int qrtr_proxy_server_fd = -1;
static struct qlistnode qrtr_proxy_connection;
static struct qlistnode qrtr_server_list;
static int verbose_debug = 0;
static uint32_t node_modem = 3; //IPQ ~ 3, QCM ~ 0
static uint32_t node_myself = 1;
static QRTR_SERVICE *find_qrtr_service(uint8_t QMIType)
{
struct qlistnode *node;
qlist_for_each (node, &qrtr_server_list) {
QRTR_SERVICE *srv = qnode_to_item(node, QRTR_SERVICE, qnode);
if (srv->service == QMIType)
return srv;
}
return NULL;
}
static uint8_t client_bitmap[0xf0];
static uint8_t port_bitmap[0xff0];
static int alloc_client_id(void) {
int id = 1;
for (id = 1; id < (int)sizeof(client_bitmap); id++) {
if (client_bitmap[id] == 0) {
client_bitmap[id] = id;
return id;
}
}
dprintf("NOT find %s()\n", __func__);
return 0;
}
static void free_client_id(int id) {
if (id < (int)sizeof(client_bitmap) && client_bitmap[id] == id) {
client_bitmap[id] = 0;
return;
}
dprintf("NOT find %s(id=%d)\n", __func__, id);
}
static int alloc_port_id(void) {
int id = 1;
for (id = 1; id < (int)sizeof(port_bitmap); id++) {
if (port_bitmap[id] == 0) {
port_bitmap[id] = id;
return id;
}
}
dprintf("NOT find %s()\n", __func__);
return 0;
}
static void free_port_id(int id) {
if (id < (int)sizeof(port_bitmap) && port_bitmap[id] == id) {
port_bitmap[id] = 0;
return;
}
dprintf("NOT find %s(id=%d)\n", __func__, id);
}
static void dump_qrtr(void *buf, size_t len, char flag)
{
size_t i;
static char printf_buf[1024];
int cnt = 0, limit=1024;
unsigned char *d = (unsigned char *)buf;
struct qrtr_hdr_v1 *hdr = (struct qrtr_hdr_v1 *)buf;
const char *ctrl_pkt_strings[] = {
[QRTR_TYPE_DATA] = "data",
[QRTR_TYPE_HELLO] = "hello",
[QRTR_TYPE_BYE] = "bye",
[QRTR_TYPE_NEW_SERVER] = "new-server",
[QRTR_TYPE_DEL_SERVER] = "del-server",
[QRTR_TYPE_DEL_CLIENT] = "del-client",
[QRTR_TYPE_RESUME_TX] = "resume-tx",
[QRTR_TYPE_EXIT] = "exit",
[QRTR_TYPE_PING] = "ping",
[QRTR_TYPE_NEW_LOOKUP] = "new-lookup",
[QRTR_TYPE_DEL_LOOKUP] = "del-lookup",
};
for (i = 0; i < len && i < 64; i++) {
if (i%4 == 0)
cnt += snprintf(printf_buf+cnt, limit-cnt, " ");
cnt += snprintf(printf_buf+cnt, limit-cnt, "%02x", d[i]);
}
dprintf("%s\n", printf_buf);
dprintf("%c ver=%d, type=%d(%s), %x,%x -> %x,%x, confirm_rx=%d, size=%u\n",
flag,
le32toh(hdr->version), le32toh(hdr->type), ctrl_pkt_strings[le32toh(hdr->type)],
le32toh(hdr->src_node_id), le32toh(hdr->src_port_id), le32toh(hdr->dst_node_id), le32toh(hdr->dst_port_id),
le32toh(hdr->confirm_rx), le32toh(hdr->size));
}
static int send_qmi_to_client(PQCQMIMSG pQMI, int fd) {
struct pollfd pollfds[]= {{fd, POLLOUT, 0}};
ssize_t ret = 0;
ssize_t size = le16toh(pQMI->QMIHdr.Length) + 1;
do {
ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000);
} while (ret == -1 && errno == EINTR && qrtr_proxy_quit == 0);
if (pollfds[0].revents & POLLOUT) {
ret = write(fd, pQMI, size);
}
return ret == size ? 0 : -1;
}
static int send_qrtr_to_dev(struct qrtr_hdr_v1 *hdr, int fd) {
struct pollfd pollfds[]= {{fd, POLLOUT, 0}};
ssize_t ret = 0;
ssize_t size = align_4(le32toh(hdr->size) + sizeof(*hdr));
do {
ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000);
} while (ret == -1 && errno == EINTR && qrtr_proxy_quit == 0);
if (pollfds[0].revents & POLLOUT) {
ret = write(fd, hdr, size);
}
return ret == size ? 0 : -1;
}
static int qrtr_node_enqueue(const void *data, size_t len,
int type, struct sockaddr_qrtr *from,
struct sockaddr_qrtr *to, unsigned int confirm_rx)
{
int rc = -1;
size_t size = sizeof(struct qrtr_hdr_v1) + len;
struct qrtr_hdr_v1 *hdr = (struct qrtr_hdr_v1 *)malloc(align_4(size));
if (hdr) {
hdr->version = htole32(QRTR_PROTO_VER_1);
hdr->type = htole32(type);
hdr->src_node_id = htole32(from->sq_node);
hdr->src_port_id = htole32(from->sq_port);
hdr->dst_node_id = htole32(to->sq_node);
hdr->dst_port_id = htole32(to->sq_port);
hdr->size = htole32(len);
hdr->confirm_rx = htole32(!!confirm_rx);
memcpy(hdr + 1, data, len);
dump_qrtr(hdr, size, '>');
send_qrtr_to_dev(hdr, cdc_wdm_fd);
free(hdr);
}
return rc;
}
static int send_ctrl_hello(__u32 sq_node, __u32 sq_port)
{
struct qrtr_ctrl_pkt pkt;
int rc;
struct sockaddr_qrtr to = {AF_QIPCRTR, sq_node, sq_port};
struct sockaddr_qrtr from = {AF_QIPCRTR, node_myself, QRTR_PORT_CTRL};
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = htole32(QRTR_TYPE_HELLO);
rc = qrtr_node_enqueue(&pkt, sizeof(pkt), QRTR_TYPE_HELLO, &from, &to, 0);
if (rc < 0)
return rc;
return 0;
}
static int ctrl_cmd_del_client(__u32 sq_node, __u32 sq_port, uint8_t QMIType)
{
struct qrtr_ctrl_pkt pkt;
int rc;
struct sockaddr_qrtr to = {AF_QIPCRTR, QRTR_NODE_BCAST, QRTR_PORT_CTRL};
struct sockaddr_qrtr from = {AF_QIPCRTR, sq_node, sq_port};
QRTR_SERVICE *srv = find_qrtr_service(QMIType);
if (srv) {
to.sq_node = srv->src_node_id;
}
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = htole32(QRTR_TYPE_DEL_CLIENT);
pkt.client.node = htole32(sq_node);
pkt.client.port = htole32(sq_port);
rc = qrtr_node_enqueue(&pkt, sizeof(pkt), QRTR_TYPE_DATA, &from, &to, 0);
if (rc < 0)
return rc;
return 0;
}
static void handle_server_change(struct qrtr_hdr_v1 *hdr) {
struct qrtr_ctrl_pkt *pkt = (struct qrtr_ctrl_pkt *)(hdr + 1);
QRTR_SERVICE *s;
dprintf ("[qrtr] %s server on %u:%u(%u:%u) -> service %u, instance %x\n",
QRTR_TYPE_NEW_SERVER == hdr->type ? "add" : "remove",
le32toh(pkt->server.node), le32toh(pkt->server.port),
le32toh(hdr->src_node_id), le32toh(hdr->src_port_id),
le32toh(pkt->server.service), le32toh(pkt->server.instance));
if (le32toh(pkt->server.node) != node_modem) {
return; //we only care modem
}
s = (QRTR_SERVICE *)malloc(sizeof(QRTR_SERVICE));
if (!s)
return;
qlist_init(&s->qnode);
s->service = le32toh(pkt->server.service);
s->version = le32toh(pkt->server.instance) & 0xff;
s->instance = le32toh(pkt->server.instance) >> 8;
s->node = le32toh(pkt->server.node);
s->port = le32toh(pkt->server.port);
s->src_node_id = le32toh(hdr->src_node_id);
s->src_port_id = le32toh(hdr->src_port_id);
if (QRTR_TYPE_NEW_SERVER == hdr->type) {
qlist_add_tail(&qrtr_server_list, &s->qnode);
}
else if (QRTR_TYPE_DEL_SERVER == hdr->type) {
qlist_remove(&s->qnode);
}
}
static int create_local_server(const char *name) {
int sockfd = -1;
int reuse_addr = 1;
struct sockaddr_un sockaddr;
socklen_t alen;
/*Create server socket*/
SYSCHECK(sockfd = socket(AF_LOCAL, SOCK_STREAM, 0));
memset(&sockaddr, 0, sizeof(sockaddr));
sockaddr.sun_family = AF_LOCAL;
sockaddr.sun_path[0] = 0;
memcpy(sockaddr.sun_path + 1, name, strlen(name) );
alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1;
SYSCHECK(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr)));
if(bind(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) {
close(sockfd);
dprintf("bind %s errno: %d (%s)\n", name, errno, strerror(errno));
return -1;
}
dprintf("local server: %s sockfd = %d\n", name, sockfd);
cfmakenoblock(sockfd);
listen(sockfd, 1);
return sockfd;
}
static uint8_t alloc_qrtr_client_id(QRTR_PROXY_CONNECTION *qrtr_con, uint8_t QMIType) {
QRTR_PROXY_CLINET *qrtr_client = (QRTR_PROXY_CLINET *)malloc(sizeof(QRTR_PROXY_CLINET));
qlist_init(&qrtr_client->qnode);
qrtr_client->QMIType = QMIType;
qrtr_client->ClientId = alloc_client_id();
qrtr_client->node_id = 1;
qrtr_client->port_id = alloc_port_id();
qrtr_client->AccessTime = 0;
dprintf("+++ ClientFd=%d QMIType=%d ClientId=%d, node_id=%d, port_id=%d\n",
qrtr_con->ClientFd, qrtr_client->QMIType, qrtr_client->ClientId,
qrtr_client->node_id, qrtr_client->port_id);
qlist_add_tail(&qrtr_con->client_qnode, &qrtr_client->qnode);
return qrtr_client->ClientId;
}
static void release_qrtr_client_id(QRTR_PROXY_CONNECTION *qrtr_con, uint8_t QMIType, uint8_t ClientId) {
struct qlistnode *client_node;
int find = 0;
qlist_for_each (client_node, &qrtr_con->client_qnode) {
QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(client_node, QRTR_PROXY_CLINET, qnode);
if (QMIType == qrtr_client->QMIType && ClientId == qrtr_client->ClientId) {
dprintf("--- ClientFd=%d QMIType=%d ClientId=%d, node_id=%d, port_id=%d\n",
qrtr_con->ClientFd, qrtr_client->QMIType, qrtr_client->ClientId,
qrtr_client->node_id, qrtr_client->port_id);
ctrl_cmd_del_client(qrtr_client->node_id, qrtr_client->port_id, qrtr_client->QMIType);
free_client_id(qrtr_client->ClientId);
free_port_id(qrtr_client->port_id);
qlist_remove(&qrtr_client->qnode);
free(qrtr_client);
find++;
break;
}
}
if (!find) {
dprintf("NOT find on %s(ClientFd=%d, QMIType=%d, ClientId=%d)\n",
__func__, qrtr_con->ClientFd, QMIType, ClientId);
}
}
static void accept_qrtr_connection(int serverfd) {
int clientfd = -1;
unsigned char addr[128];
socklen_t alen = sizeof(addr);
QRTR_PROXY_CONNECTION *qrtr_con;
clientfd = accept(serverfd, (struct sockaddr *)addr, &alen);
qrtr_con = (QRTR_PROXY_CONNECTION *)malloc(sizeof(QRTR_PROXY_CONNECTION));
if (qrtr_con) {
qlist_init(&qrtr_con->qnode);
qlist_init(&qrtr_con->client_qnode);
qrtr_con->ClientFd= clientfd;
qrtr_con->AccessTime = 0;
dprintf("+++ ClientFd=%d\n", qrtr_con->ClientFd);
qlist_add_tail(&qrtr_proxy_connection, &qrtr_con->qnode);
}
cfmakenoblock(clientfd);
}
static void cleanup_qrtr_connection(int clientfd) {
struct qlistnode *con_node;
int find = 0;
qlist_for_each(con_node, &qrtr_proxy_connection) {
QRTR_PROXY_CONNECTION *qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode);
if (qrtr_con->ClientFd == clientfd) {
while (!qlist_empty(&qrtr_con->client_qnode)) {
QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(qlist_head(&qrtr_con->client_qnode), QRTR_PROXY_CLINET, qnode);
release_qrtr_client_id(qrtr_con, qrtr_client->QMIType, qrtr_client->ClientId);
}
dprintf("--- ClientFd=%d\n", qrtr_con->ClientFd);
close(qrtr_con->ClientFd);
qlist_remove(&qrtr_con->qnode);
free(qrtr_con);
find = 1;
break;
}
}
if (!find) {
dprintf("NOT find on %s(ClientFd=%d)\n", __func__, clientfd);
}
}
static void recv_qrtr_from_dev(struct qrtr_hdr_v1 *hdr) {
int find = 0;
uint32_t type = le32toh(hdr->type);
if (type == QRTR_TYPE_HELLO) {
send_ctrl_hello(le32toh(hdr->src_node_id), le32toh(hdr->src_port_id));
find++;
}
else if (type == QRTR_TYPE_NEW_SERVER || type == QRTR_TYPE_DEL_SERVER) {
handle_server_change(hdr);
find++;
}
else if (type == QRTR_TYPE_DATA) {
struct qlistnode *con_node, *client_node;
qlist_for_each(con_node, &qrtr_proxy_connection) {
QRTR_PROXY_CONNECTION *qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode);
qlist_for_each(client_node, &qrtr_con->client_qnode) {
QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(client_node, QRTR_PROXY_CLINET, qnode);
if (qrtr_client->node_id == le32toh(hdr->dst_node_id) && qrtr_client->port_id == le32toh(hdr->dst_port_id)) {
PQCQMIMSG pQMI = (PQCQMIMSG)malloc(hdr->size + sizeof(QCQMI_HDR));
if (pQMI) {
pQMI->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI;
pQMI->QMIHdr.Length = htole16(hdr->size + sizeof(QCQMI_HDR) - 1);
pQMI->QMIHdr.CtlFlags = 0x00;
pQMI->QMIHdr.QMIType = qrtr_client->QMIType;
pQMI->QMIHdr.ClientId = qrtr_client->ClientId;
memcpy(&pQMI->MUXMsg, hdr + 1, hdr->size);
send_qmi_to_client(pQMI, qrtr_con->ClientFd);
free(pQMI);
find++;
}
}
}
}
if (hdr->confirm_rx) {
struct qrtr_ctrl_pkt pkt;
struct sockaddr_qrtr from = {AF_QIPCRTR, le32toh(hdr->dst_node_id), le32toh(hdr->dst_port_id)};
struct sockaddr_qrtr to = {AF_QIPCRTR, le32toh(hdr->src_node_id), le32toh(hdr->src_port_id)};
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = htole32(QRTR_TYPE_RESUME_TX);
pkt.client.node = hdr->dst_node_id;
pkt.client.port = hdr->dst_port_id;
qrtr_node_enqueue(&pkt, sizeof(pkt), QRTR_TYPE_RESUME_TX, &from, &to, 0);
}
}
else if (type == QRTR_TYPE_RESUME_TX) {
}
if (!find) {
dprintf("NOT find on %s()\n", __func__);
}
}
static int recv_qmi_from_client(PQCQMIMSG pQMI, int clientfd) {
QRTR_PROXY_CONNECTION *qrtr_con;
struct qlistnode *con_node, *client_node;
int find = 0;
qlist_for_each(con_node, &qrtr_proxy_connection) {
qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode);
if (qrtr_con->ClientFd == clientfd)
break;
qrtr_con = NULL;
}
if (!qrtr_con) {
return -1;
}
if (le16toh(pQMI->QMIHdr.QMIType) == QMUX_TYPE_CTL) {
if (pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType == QMICTL_SYNC_REQ) {
dprintf("do not allow client send QMICTL_SYNC_REQ\n");
return 0;
}
else if (le16toh(pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType) == QMICTL_GET_CLIENT_ID_REQ) {
uint8_t QMIType = pQMI->CTLMsg.GetClientIdReq.QMIType;
PQCQMIMSG pRsp = (PQCQMIMSG)malloc(256);
if (pRsp) {
uint8_t ClientId = 0;
if (find_qrtr_service(QMIType)) {
ClientId = alloc_qrtr_client_id(qrtr_con, QMIType);
}
pRsp->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI;
pRsp->QMIHdr.Length = htole16(sizeof(pRsp->CTLMsg.GetClientIdRsp) + sizeof(pRsp->QMIHdr) - 1);
pRsp->QMIHdr.CtlFlags = 0x00;
pRsp->QMIHdr.QMIType = QMUX_TYPE_CTL;
pRsp->QMIHdr.ClientId = 0;
pRsp->CTLMsg.QMICTLMsgHdrRsp.CtlFlags = QMICTL_FLAG_RESPONSE;
pRsp->CTLMsg.QMICTLMsgHdrRsp.TransactionId = pQMI->CTLMsg.QMICTLMsgHdr.TransactionId;
pRsp->CTLMsg.QMICTLMsgHdrRsp.QMICTLType = pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType;
pRsp->CTLMsg.QMICTLMsgHdrRsp.Length = htole16(sizeof(pRsp->CTLMsg.GetClientIdRsp) - sizeof(pRsp->CTLMsg.QMICTLMsgHdr));
pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVType = QCTLV_TYPE_RESULT_CODE;
pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVLength = htole16(4);
pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXResult = htole16(ClientId ? 0 : QMI_RESULT_FAILURE);
pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXError = htole16(ClientId ? 0 : QMI_ERR_INTERNAL);
pRsp->CTLMsg.GetClientIdRsp.TLV2Type = QCTLV_TYPE_REQUIRED_PARAMETER;
pRsp->CTLMsg.GetClientIdRsp.TLV2Length = htole16(2);
pRsp->CTLMsg.GetClientIdRsp.QMIType = QMIType;
pRsp->CTLMsg.GetClientIdRsp.ClientId = ClientId;
send_qmi_to_client(pRsp, clientfd);
free(pRsp);
find++;
}
}
else if (le16toh(pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType) == QMICTL_RELEASE_CLIENT_ID_REQ) {
PQCQMIMSG pRsp = (PQCQMIMSG)malloc(256);
release_qrtr_client_id(qrtr_con, pQMI->CTLMsg.ReleaseClientIdReq.QMIType, pQMI->CTLMsg.ReleaseClientIdReq.ClientId);
if (pRsp) {
pRsp->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI;
pRsp->QMIHdr.Length = htole16(sizeof(pRsp->CTLMsg.ReleaseClientIdRsp) + sizeof(pRsp->QMIHdr) - 1);
pRsp->QMIHdr.CtlFlags = 0x00;
pRsp->QMIHdr.QMIType = QMUX_TYPE_CTL;
pRsp->QMIHdr.ClientId = 0;
pRsp->CTLMsg.QMICTLMsgHdrRsp.CtlFlags = QMICTL_FLAG_RESPONSE;
pRsp->CTLMsg.QMICTLMsgHdrRsp.TransactionId = pQMI->CTLMsg.QMICTLMsgHdr.TransactionId;
pRsp->CTLMsg.QMICTLMsgHdrRsp.QMICTLType = pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType;
pRsp->CTLMsg.QMICTLMsgHdrRsp.Length = htole16(sizeof(pRsp->CTLMsg.ReleaseClientIdRsp) - sizeof(pRsp->CTLMsg.QMICTLMsgHdr));
pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVType = QCTLV_TYPE_RESULT_CODE;
pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVLength = htole16(4);
pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXResult = htole16(0);
pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXError = htole16(0);
pRsp->CTLMsg.ReleaseClientIdRsp.TLV2Type = QCTLV_TYPE_REQUIRED_PARAMETER;
pRsp->CTLMsg.ReleaseClientIdRsp.TLV2Length = htole16(2);
pRsp->CTLMsg.ReleaseClientIdRsp.QMIType = pQMI->CTLMsg.ReleaseClientIdReq.QMIType;
pRsp->CTLMsg.ReleaseClientIdRsp.ClientId = pQMI->CTLMsg.ReleaseClientIdReq.ClientId;
send_qmi_to_client(pRsp, clientfd);
free(pRsp);
find++;
}
}
else if (le16toh(pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType) == QMICTL_GET_VERSION_REQ) {
PQCQMIMSG pRsp = (PQCQMIMSG)malloc(256);
if (pRsp) {
pRsp->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI;
pRsp->QMIHdr.Length = htole16(sizeof(pRsp->CTLMsg.GetVersionRsp) + sizeof(pRsp->QMIHdr) - 1);
pRsp->QMIHdr.CtlFlags = 0x00;
pRsp->QMIHdr.QMIType = QMUX_TYPE_CTL;
pRsp->QMIHdr.ClientId = 0;
pRsp->CTLMsg.QMICTLMsgHdrRsp.CtlFlags = QMICTL_FLAG_RESPONSE;
pRsp->CTLMsg.QMICTLMsgHdrRsp.TransactionId = pQMI->CTLMsg.QMICTLMsgHdr.TransactionId;
pRsp->CTLMsg.QMICTLMsgHdrRsp.QMICTLType = pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType;
pRsp->CTLMsg.QMICTLMsgHdrRsp.Length = htole16(sizeof(pRsp->CTLMsg.GetVersionRsp) - sizeof(pRsp->CTLMsg.QMICTLMsgHdr));
pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVType = QCTLV_TYPE_RESULT_CODE;
pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVLength = htole16(4);
pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXResult = htole16(0);
pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXError = htole16(0);
pRsp->CTLMsg.GetVersionRsp.TLV2Type = QCTLV_TYPE_REQUIRED_PARAMETER;
pRsp->CTLMsg.GetVersionRsp.TLV2Length = htole16(1);
pRsp->CTLMsg.GetVersionRsp.NumElements = 0;
send_qmi_to_client(pRsp, clientfd);
free(pRsp);
find++;
}
}
}
else {
qlist_for_each (client_node, &qrtr_con->client_qnode) {
QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(client_node, QRTR_PROXY_CLINET, qnode);
if (pQMI->QMIHdr.QMIType == qrtr_client->QMIType && pQMI->QMIHdr.ClientId == qrtr_client->ClientId) {
QRTR_SERVICE *srv = find_qrtr_service(pQMI->QMIHdr.QMIType);
if (srv && srv->service) {
struct sockaddr_qrtr from = {AF_QIPCRTR, qrtr_client->node_id, qrtr_client->port_id};
struct sockaddr_qrtr to = {AF_QIPCRTR, srv->node, srv->port};
qrtr_node_enqueue(&pQMI->MUXMsg, le16toh(pQMI->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR),
QRTR_TYPE_DATA, &from, &to, 0);
find++;
}
break;
}
}
}
if (!find) {
dprintf("NOT find on %s()\n", __func__);
}
return 0;
}
static int qrtr_proxy_init(void) {
unsigned i;
int qrtr_sync_done = 0;
dprintf("%s enter\n", __func__);
send_ctrl_hello(QRTR_NODE_BCAST, QRTR_PORT_CTRL);
for (i = 0; i < 10; i++) {
sleep(1);
qrtr_sync_done = !qlist_empty(&qrtr_server_list);
if (qrtr_sync_done)
break;
}
dprintf("%s %s\n", __func__, qrtr_sync_done ? "succful" : "fail");
return qrtr_sync_done ? 0 : -1;
}
static void qrtr_start_server(const char* servername) {
qrtr_proxy_server_fd = create_local_server(servername);
dprintf("qrtr_proxy_server_fd = %d\n", qrtr_proxy_server_fd);
if (qrtr_proxy_server_fd == -1) {
dprintf("Failed to create %s, errno: %d (%s)\n", servername, errno, strerror(errno));
}
}
static void qrtr_close_server(const char* servername) {
if (qrtr_proxy_server_fd != -1) {
dprintf("%s %s\n", __func__, servername);
close(qrtr_proxy_server_fd);
qrtr_proxy_server_fd = -1;
}
}
static void *qrtr_proxy_loop(void *param)
{
void *rx_buf;
struct qlistnode *con_node;
QRTR_PROXY_CONNECTION *qrtr_con;
(void)param;
dprintf("%s enter thread_id %p\n", __func__, (void *)pthread_self());
rx_buf = malloc(8192);
if (!rx_buf)
return NULL;
while (cdc_wdm_fd > 0 && qrtr_proxy_quit == 0) {
struct pollfd pollfds[32];
int ne, ret, nevents = 0;
ssize_t nreads;
pollfds[nevents].fd = cdc_wdm_fd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
if (qrtr_proxy_server_fd > 0) {
pollfds[nevents].fd = qrtr_proxy_server_fd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
}
qlist_for_each(con_node, &qrtr_proxy_connection) {
qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode);
pollfds[nevents].fd = qrtr_con->ClientFd;
pollfds[nevents].events = POLLIN;
pollfds[nevents].revents= 0;
nevents++;
if (nevents == (sizeof(pollfds)/sizeof(pollfds[0])))
break;
}
do {
//ret = poll(pollfds, nevents, -1);
ret = poll(pollfds, nevents, (qrtr_proxy_server_fd > 0) ? -1 : 200);
} while (ret == -1 && errno == EINTR && qrtr_proxy_quit == 0);
if (ret < 0) {
dprintf("%s poll=%d, errno: %d (%s)\n", __func__, ret, errno, strerror(errno));
goto qrtr_proxy_loop_exit;
}
for (ne = 0; ne < nevents; ne++) {
int fd = pollfds[ne].fd;
short revents = pollfds[ne].revents;
if (revents & (POLLERR | POLLHUP | POLLNVAL)) {
dprintf("%s poll fd = %d, revents = %04x\n", __func__, fd, revents);
if (fd == cdc_wdm_fd) {
goto qrtr_proxy_loop_exit;
}
else if (fd == qrtr_proxy_server_fd) {
}
else {
cleanup_qrtr_connection(fd);
}
continue;
}
if (!(pollfds[ne].revents & POLLIN)) {
continue;
}
if (fd == qrtr_proxy_server_fd) {
accept_qrtr_connection(fd);
}
else if (fd == cdc_wdm_fd) {
struct qrtr_hdr_v1 *hdr = (struct qrtr_hdr_v1 *)rx_buf;
nreads = read(fd, rx_buf, 8192);
if (nreads <= 0) {
dprintf("%s read=%d errno: %d (%s)\n", __func__, (int)nreads, errno, strerror(errno));
goto qrtr_proxy_loop_exit;
}
else if (nreads != (int)align_4(le32toh(hdr->size) + sizeof(*hdr))) {
dprintf("%s nreads=%d, hdr->size = %d\n", __func__, (int)nreads, le32toh(hdr->size));
continue;
}
dump_qrtr(hdr, nreads, '<');
recv_qrtr_from_dev(hdr);
}
else {
PQCQMIMSG pQMI = (PQCQMIMSG)rx_buf;
nreads = read(fd, rx_buf, 8192);
if (nreads <= 0) {
dprintf("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno));
cleanup_qrtr_connection(fd);
break;
}
else if (nreads != (le16toh(pQMI->QMIHdr.Length) + 1)) {
dprintf("%s nreads=%d, pQCQMI->QMIHdr.Length = %d\n", __func__, (int)nreads, le16toh(pQMI->QMIHdr.Length));
continue;
}
recv_qmi_from_client(pQMI, fd);
}
}
}
qrtr_proxy_loop_exit:
while (!qlist_empty(&qrtr_proxy_connection)) {
QRTR_PROXY_CONNECTION *qrtr_con = qnode_to_item(qlist_head(&qrtr_proxy_connection), QRTR_PROXY_CONNECTION, qnode);
cleanup_qrtr_connection(qrtr_con->ClientFd);
}
dprintf("%s exit, thread_id %p\n", __func__, (void *)pthread_self());
free(rx_buf);
return NULL;
}
static void usage(void) {
dprintf(" -d <device_name> A valid qrtr device\n"
" default /dev/mhi_IPCR, but mhi_IPCR may be invalid\n"
" -i <netcard_name> netcard name\n"
" -v Will show all details\n");
}
static void sig_action(int sig) {
if (qrtr_proxy_quit == 0) {
qrtr_proxy_quit = 1;
if (thread_id)
pthread_kill(thread_id, sig);
}
}
int main(int argc, char *argv[]) {
int opt;
char cdc_wdm[32+1] = "/dev/mhi_IPCR";
char servername[64] = {0};
signal(SIGINT, sig_action);
signal(SIGTERM, sig_action);
optind = 1;
while ( -1 != (opt = getopt(argc, argv, "d:i:vh"))) {
switch (opt) {
case 'd':
strcpy(cdc_wdm, optarg);
break;
case 'v':
verbose_debug = 1;
break;
default:
usage();
return 0;
}
}
sprintf(servername, "quectel-qrtr-proxy%c", cdc_wdm[strlen(cdc_wdm)-1]);
dprintf("Will use cdc-wdm='%s', proxy='%s'\n", cdc_wdm, servername);
while (qrtr_proxy_quit == 0) {
cdc_wdm_fd = open(cdc_wdm, O_RDWR | O_NONBLOCK | O_NOCTTY);
if (cdc_wdm_fd == -1) {
dprintf("Failed to open %s, errno: %d (%s)\n", cdc_wdm, errno, strerror(errno));
sleep(5);
continue;
}
cfmakenoblock(cdc_wdm_fd);
qlist_init(&qrtr_proxy_connection);
qlist_init(&qrtr_server_list);
pthread_create(&thread_id, NULL, qrtr_proxy_loop, NULL);
if (qrtr_proxy_init() == 0) {
qrtr_start_server(servername);
pthread_join(thread_id, NULL);
qrtr_close_server(servername);
}
else {
pthread_cancel(thread_id);
pthread_join(thread_id, NULL);
}
close(cdc_wdm_fd);
}
return 0;
}

View File

@ -0,0 +1,342 @@
//https://source.codeaurora.org/quic/la/platform/vendor/qcom-opensource/dataservices/tree/rmnetctl
#include <sys/socket.h>
#include <stdint.h>
#include <linux/netlink.h>
#include <string.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <linux/rtnetlink.h>
#include <linux/gen_stats.h>
#include <net/if.h>
#include <asm/types.h>
#include <linux/rmnet_data.h>
#define RMNETCTL_SUCCESS 0
#define RMNETCTL_LIB_ERR 1
#define RMNETCTL_KERNEL_ERR 2
#define RMNETCTL_INVALID_ARG 3
enum rmnetctl_error_codes_e {
RMNETCTL_API_SUCCESS = 0,
RMNETCTL_API_FIRST_ERR = 1,
RMNETCTL_API_ERR_MESSAGE_SEND = 3,
RMNETCTL_API_ERR_MESSAGE_RECEIVE = 4,
RMNETCTL_INIT_FIRST_ERR = 5,
RMNETCTL_INIT_ERR_PROCESS_ID = RMNETCTL_INIT_FIRST_ERR,
RMNETCTL_INIT_ERR_NETLINK_FD = 6,
RMNETCTL_INIT_ERR_BIND = 7,
RMNETCTL_API_SECOND_ERR = 9,
RMNETCTL_API_ERR_HNDL_INVALID = RMNETCTL_API_SECOND_ERR,
RMNETCTL_API_ERR_RETURN_TYPE = 13,
};
struct rmnetctl_hndl_s {
uint32_t pid;
uint32_t transaction_id;
int netlink_fd;
struct sockaddr_nl src_addr, dest_addr;
};
typedef struct rmnetctl_hndl_s rmnetctl_hndl_t;
#define NLMSG_TAIL(nmsg) \
((struct rtattr *) (((char *)(nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len)))
struct nlmsg {
struct nlmsghdr nl_addr;
struct ifinfomsg ifmsg;
char data[500];
};
#define MIN_VALID_PROCESS_ID 0
#define MIN_VALID_SOCKET_FD 0
#define KERNEL_PROCESS_ID 0
#define UNICAST 0
enum {
IFLA_RMNET_UL_AGG_PARAMS = __IFLA_RMNET_MAX,
__IFLA_RMNET_EXT_MAX,
};
struct rmnet_egress_agg_params {
uint16_t agg_size;
uint16_t agg_count;
uint32_t agg_time;
};
static int rmnet_get_ack(rmnetctl_hndl_t *hndl, uint16_t *error_code)
{
struct nlack {
struct nlmsghdr ackheader;
struct nlmsgerr ackdata;
char data[256];
} ack;
int i;
if (!hndl || !error_code)
return RMNETCTL_INVALID_ARG;
if ((i = recv(hndl->netlink_fd, &ack, sizeof(ack), 0)) < 0) {
*error_code = errno;
return RMNETCTL_API_ERR_MESSAGE_RECEIVE;
}
/*Ack should always be NLMSG_ERROR type*/
if (ack.ackheader.nlmsg_type == NLMSG_ERROR) {
if (ack.ackdata.error == 0) {
*error_code = RMNETCTL_API_SUCCESS;
return RMNETCTL_SUCCESS;
} else {
*error_code = -ack.ackdata.error;
return RMNETCTL_KERNEL_ERR;
}
}
*error_code = RMNETCTL_API_ERR_RETURN_TYPE;
return RMNETCTL_API_FIRST_ERR;
}
static int rtrmnet_ctl_init(rmnetctl_hndl_t **hndl, uint16_t *error_code)
{
struct sockaddr_nl __attribute__((__may_alias__)) *saddr_ptr;
int netlink_fd = -1;
pid_t pid = 0;
if (!hndl || !error_code)
return RMNETCTL_INVALID_ARG;
*hndl = (rmnetctl_hndl_t *)malloc(sizeof(rmnetctl_hndl_t));
if (!*hndl) {
*error_code = RMNETCTL_API_ERR_HNDL_INVALID;
return RMNETCTL_LIB_ERR;
}
memset(*hndl, 0, sizeof(rmnetctl_hndl_t));
pid = getpid();
if (pid < MIN_VALID_PROCESS_ID) {
free(*hndl);
*error_code = RMNETCTL_INIT_ERR_PROCESS_ID;
return RMNETCTL_LIB_ERR;
}
(*hndl)->pid = KERNEL_PROCESS_ID;
netlink_fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (netlink_fd < MIN_VALID_SOCKET_FD) {
free(*hndl);
*error_code = RMNETCTL_INIT_ERR_NETLINK_FD;
return RMNETCTL_LIB_ERR;
}
(*hndl)->netlink_fd = netlink_fd;
memset(&(*hndl)->src_addr, 0, sizeof(struct sockaddr_nl));
(*hndl)->src_addr.nl_family = AF_NETLINK;
(*hndl)->src_addr.nl_pid = (*hndl)->pid;
saddr_ptr = &(*hndl)->src_addr;
if (bind((*hndl)->netlink_fd,
(struct sockaddr *)saddr_ptr,
sizeof(struct sockaddr_nl)) < 0) {
close((*hndl)->netlink_fd);
free(*hndl);
*error_code = RMNETCTL_INIT_ERR_BIND;
return RMNETCTL_LIB_ERR;
}
memset(&(*hndl)->dest_addr, 0, sizeof(struct sockaddr_nl));
(*hndl)->dest_addr.nl_family = AF_NETLINK;
(*hndl)->dest_addr.nl_pid = KERNEL_PROCESS_ID;
(*hndl)->dest_addr.nl_groups = UNICAST;
return RMNETCTL_SUCCESS;
}
static int rtrmnet_ctl_deinit(rmnetctl_hndl_t *hndl)
{
if (!hndl)
return RMNETCTL_SUCCESS;
close(hndl->netlink_fd);
free(hndl);
return RMNETCTL_SUCCESS;
}
static int rtrmnet_ctl_newvnd(rmnetctl_hndl_t *hndl, char *devname, char *vndname,
uint16_t *error_code, uint8_t index,
uint32_t flagconfig, uint32_t ul_agg_cnt, uint32_t ul_agg_size)
{
struct rtattr *attrinfo, *datainfo, *linkinfo;
struct ifla_vlan_flags flags;
int devindex = 0, val = 0;
char *kind = "rmnet";
struct nlmsg req;
short id;
if (!hndl || !devname || !vndname || !error_code)
return RMNETCTL_INVALID_ARG;
memset(&req, 0, sizeof(req));
req.nl_addr.nlmsg_type = RTM_NEWLINK;
req.nl_addr.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
req.nl_addr.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL |
NLM_F_ACK;
req.nl_addr.nlmsg_seq = hndl->transaction_id;
hndl->transaction_id++;
/* Get index of devname*/
devindex = if_nametoindex(devname);
if (devindex < 0) {
*error_code = errno;
return RMNETCTL_KERNEL_ERR;
}
/* Setup link attr with devindex as data */
val = devindex;
attrinfo = (struct rtattr *)(((char *)&req) +
NLMSG_ALIGN(req.nl_addr.nlmsg_len));
attrinfo->rta_type = IFLA_LINK;
attrinfo->rta_len = RTA_ALIGN(RTA_LENGTH(sizeof(val)));
memcpy(RTA_DATA(attrinfo), &val, sizeof(val));
req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) +
RTA_ALIGN(RTA_LENGTH(sizeof(val)));
/* Set up IFLA info kind RMNET that has linkinfo and type */
attrinfo = (struct rtattr *)(((char *)&req) +
NLMSG_ALIGN(req.nl_addr.nlmsg_len));
attrinfo->rta_type = IFLA_IFNAME;
attrinfo->rta_len = RTA_ALIGN(RTA_LENGTH(strlen(vndname) + 1));
memcpy(RTA_DATA(attrinfo), vndname, strlen(vndname) + 1);
req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) +
RTA_ALIGN(RTA_LENGTH(strlen(vndname) + 1));
linkinfo = (struct rtattr *)(((char *)&req) +
NLMSG_ALIGN(req.nl_addr.nlmsg_len));
linkinfo->rta_type = IFLA_LINKINFO;
linkinfo->rta_len = RTA_ALIGN(RTA_LENGTH(0));
req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) +
RTA_ALIGN(RTA_LENGTH(0));
attrinfo = (struct rtattr *)(((char *)&req) +
NLMSG_ALIGN(req.nl_addr.nlmsg_len));
attrinfo->rta_type = IFLA_INFO_KIND;
attrinfo->rta_len = RTA_ALIGN(RTA_LENGTH(strlen(kind)));
memcpy(RTA_DATA(attrinfo), kind, strlen(kind));
req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) +
RTA_ALIGN(RTA_LENGTH(strlen(kind)));
datainfo = (struct rtattr *)(((char *)&req) +
NLMSG_ALIGN(req.nl_addr.nlmsg_len));
datainfo->rta_type = IFLA_INFO_DATA;
datainfo->rta_len = RTA_ALIGN(RTA_LENGTH(0));
req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) +
RTA_ALIGN(RTA_LENGTH(0));
id = index;
attrinfo = (struct rtattr *)(((char *)&req) +
NLMSG_ALIGN(req.nl_addr.nlmsg_len));
attrinfo->rta_type = IFLA_VLAN_ID;
attrinfo->rta_len = RTA_LENGTH(sizeof(id));
memcpy(RTA_DATA(attrinfo), &id, sizeof(id));
req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) +
RTA_ALIGN(RTA_LENGTH(sizeof(id)));
if (flagconfig != 0) {
flags.mask = flagconfig;
flags.flags = flagconfig;
attrinfo = (struct rtattr *)(((char *)&req) +
NLMSG_ALIGN(req.nl_addr.nlmsg_len));
attrinfo->rta_type = IFLA_VLAN_FLAGS;
attrinfo->rta_len = RTA_LENGTH(sizeof(flags));
memcpy(RTA_DATA(attrinfo), &flags, sizeof(flags));
req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) +
RTA_ALIGN(RTA_LENGTH(sizeof(flags)));
}
if (ul_agg_cnt > 1) {
struct rmnet_egress_agg_params agg_params;
agg_params.agg_size = ul_agg_size;
agg_params.agg_count = ul_agg_cnt;
agg_params.agg_time = 3000000;
attrinfo = (struct rtattr *)(((char *)&req) +
NLMSG_ALIGN(req.nl_addr.nlmsg_len));
attrinfo->rta_type = IFLA_RMNET_UL_AGG_PARAMS;
attrinfo->rta_len = RTA_LENGTH(sizeof(agg_params));
memcpy(RTA_DATA(attrinfo), &agg_params, sizeof(agg_params));
req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) +
RTA_ALIGN(RTA_LENGTH(sizeof(agg_params)));
}
datainfo->rta_len = (char *)NLMSG_TAIL(&req.nl_addr) - (char *)datainfo;
linkinfo->rta_len = (char *)NLMSG_TAIL(&req.nl_addr) - (char *)linkinfo;
if (send(hndl->netlink_fd, &req, req.nl_addr.nlmsg_len, 0) < 0) {
*error_code = RMNETCTL_API_ERR_MESSAGE_SEND;
return RMNETCTL_LIB_ERR;
}
return rmnet_get_ack(hndl, error_code);
}
int rtrmnet_ctl_create_vnd(char *devname, char *vndname, uint8_t muxid,
uint32_t qmap_version, uint32_t ul_agg_cnt, uint32_t ul_agg_size)
{
struct rmnetctl_hndl_s *handle;
uint16_t error_code;
int return_code;
uint32_t flagconfig = RMNET_FLAGS_INGRESS_DEAGGREGATION;
printf("%s devname: %s, vndname: %s, muxid: %d, qmap_version: %d\n",
__func__, devname, vndname, muxid, qmap_version);
ul_agg_cnt = 0; //TODO
if (ul_agg_cnt > 1)
flagconfig |= RMNET_EGRESS_FORMAT_AGGREGATION;
if (qmap_version == 9) { //QMAPV5
#ifdef RMNET_FLAGS_INGRESS_MAP_CKSUMV5
flagconfig |= RMNET_FLAGS_INGRESS_MAP_CKSUMV5;
flagconfig |= RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
#else
return -1001;
#endif
}
else if (qmap_version == 8) { //QMAPV4
flagconfig |= RMNET_FLAGS_INGRESS_MAP_CKSUMV4;
flagconfig |= RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
}
else if (qmap_version == 5) { //QMAPV1
}
else {
flagconfig = 0;
}
return_code = rtrmnet_ctl_init(&handle, &error_code);
if (return_code) {
printf("rtrmnet_ctl_init error_code: %d, return_code: %d, errno: %d (%s)\n",
error_code, return_code, errno, strerror(errno));
}
if (return_code == RMNETCTL_SUCCESS) {
return_code = rtrmnet_ctl_newvnd(handle, devname, vndname, &error_code,
muxid, flagconfig, ul_agg_cnt, ul_agg_size);
if (return_code) {
printf("rtrmnet_ctl_newvnd error_code: %d, return_code: %d, errno: %d (%s)\n",
error_code, return_code, errno, strerror(errno));
}
rtrmnet_ctl_deinit(handle);
}
return return_code;
}

View File

@ -0,0 +1,745 @@
/******************************************************************************
@file udhcpc.c
@brief call DHCP tools to obtain IP address.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include <sys/socket.h>
#include <sys/select.h>
#include <sys/types.h>
#include <net/if.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <endian.h>
#include "util.h"
#include "QMIThread.h"
extern int ql_get_netcard_carrier_state(const char *devname);
static __inline in_addr_t qmi2addr(uint32_t __x) {
return (__x>>24) | (__x>>8&0xff00) | (__x<<8&0xff0000) | (__x<<24);
}
static int ql_system(const char *shell_cmd) {
dbg_time("%s", shell_cmd);
return system(shell_cmd);
}
static void ifc_init_ifr(const char *name, struct ifreq *ifr)
{
memset(ifr, 0, sizeof(struct ifreq));
no_trunc_strncpy(ifr->ifr_name, name, IFNAMSIZ);
ifr->ifr_name[IFNAMSIZ - 1] = 0;
}
static void ql_set_mtu(const char *ifname, int ifru_mtu) {
int inet_sock;
struct ifreq ifr;
inet_sock = socket(AF_INET, SOCK_DGRAM, 0);
if (inet_sock > 0) {
ifc_init_ifr(ifname, &ifr);
if (!ioctl(inet_sock, SIOCGIFMTU, &ifr)) {
if (ifr.ifr_ifru.ifru_mtu != ifru_mtu) {
dbg_time("change mtu %d -> %d", ifr.ifr_ifru.ifru_mtu , ifru_mtu);
ifr.ifr_ifru.ifru_mtu = ifru_mtu;
ioctl(inet_sock, SIOCSIFMTU, &ifr);
}
}
close(inet_sock);
}
}
static int ifc_get_addr(const char *name, in_addr_t *addr)
{
int inet_sock;
struct ifreq ifr;
int ret = 0;
inet_sock = socket(AF_INET, SOCK_DGRAM, 0);
ifc_init_ifr(name, &ifr);
if (addr != NULL) {
ret = ioctl(inet_sock, SIOCGIFADDR, &ifr);
if (ret < 0) {
*addr = 0;
} else {
*addr = ((struct sockaddr_in*) &ifr.ifr_addr)->sin_addr.s_addr;
}
}
close(inet_sock);
return ret;
}
static short ifc_get_flags(const char *ifname)
{
int inet_sock;
struct ifreq ifr;
int ret = 0;
inet_sock = socket(AF_INET, SOCK_DGRAM, 0);
if (inet_sock > 0) {
ifc_init_ifr(ifname, &ifr);
if (!ioctl(inet_sock, SIOCGIFFLAGS, &ifr)) {
ret = ifr.ifr_ifru.ifru_flags;
}
close(inet_sock);
}
return ret;
}
static void ifc_set_state(const char *ifname, int state) {
char shell_cmd[128];
if (!access("/sbin/ip", X_OK)) {
snprintf(shell_cmd, sizeof(shell_cmd), "ip link set dev %s %s", ifname, state ? "up" : "down");
} else {
snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s %s", ifname, state ? "up" : "down");
}
ql_system(shell_cmd);
}
static int ql_netcard_ipv4_address_check(const char *ifname, in_addr_t ip) {
in_addr_t addr = 0;
ifc_get_addr(ifname, &addr);
return addr == ip;
}
static int ql_raw_ip_mode_check(const char *ifname, uint32_t ip) {
int fd;
char raw_ip[128];
char mode[2] = "X";
int mode_change = 0;
if (ql_netcard_ipv4_address_check(ifname, qmi2addr(ip)))
return 0;
snprintf(raw_ip, sizeof(raw_ip), "/sys/class/net/%s/qmi/raw_ip", ifname);
if (access(raw_ip, F_OK))
return 0;
fd = open(raw_ip, O_RDWR | O_NONBLOCK | O_NOCTTY);
if (fd < 0) {
dbg_time("%s %d fail to open(%s), errno:%d (%s)", __FILE__, __LINE__, raw_ip, errno, strerror(errno));
return 0;
}
if (read(fd, mode, 2) == -1) {};
if (mode[0] == '0' || mode[0] == 'N') {
dbg_time("File:%s Line:%d udhcpc fail to get ip address, try next:", __func__, __LINE__);
ifc_set_state(ifname, 0);
dbg_time("echo Y > /sys/class/net/%s/qmi/raw_ip", ifname);
mode[0] = 'Y';
if (write(fd, mode, 2) == -1) {};
mode_change = 1;
ifc_set_state(ifname, 1);
}
close(fd);
return mode_change;
}
static void* udhcpc_thread_function(void* arg) {
FILE * udhcpc_fp;
char *udhcpc_cmd = (char *)arg;
if (udhcpc_cmd == NULL)
return NULL;
dbg_time("%s", udhcpc_cmd);
udhcpc_fp = popen(udhcpc_cmd, "r");
free(udhcpc_cmd);
if (udhcpc_fp) {
char buf[0xff];
buf[sizeof(buf)-1] = '\0';
while((fgets(buf, sizeof(buf)-1, udhcpc_fp)) != NULL) {
if ((strlen(buf) > 1) && (buf[strlen(buf) - 1] == '\n'))
buf[strlen(buf) - 1] = '\0';
dbg_time("%s", buf);
}
pclose(udhcpc_fp);
}
return NULL;
}
//#define USE_DHCLIENT
#ifdef USE_DHCLIENT
static int dhclient_alive = 0;
#endif
static int dibbler_client_alive = 0;
void ql_set_driver_link_state(PROFILE_T *profile, int link_state) {
char link_file[128];
int fd;
int new_state = 0;
snprintf(link_file, sizeof(link_file), "/sys/class/net/%s/link_state", profile->usbnet_adapter);
fd = open(link_file, O_RDWR | O_NONBLOCK | O_NOCTTY);
if (fd == -1) {
if (errno != ENOENT)
dbg_time("Fail to access %s, errno: %d (%s)", link_file, errno, strerror(errno));
return;
}
if (profile->qmap_mode <= 1)
new_state = !!link_state;
else {
//0x80 means link off this pdp
new_state = (link_state ? 0x00 : 0x80) + (profile->muxid & 0x7F);
}
snprintf(link_file, sizeof(link_file), "%d\n", new_state);
if (write(fd, link_file, sizeof(link_file)) == -1) {};
if (link_state == 0 && profile->qmapnet_adapter[0]
&& strcmp(profile->qmapnet_adapter, profile->usbnet_adapter)) {
size_t rc;
lseek(fd, 0, SEEK_SET);
rc = read(fd, link_file, sizeof(link_file));
if (rc > 1 && (!strncasecmp(link_file, "0\n", 2) || !strncasecmp(link_file, "0x0\n", 4))) {
ifc_set_state(profile->usbnet_adapter, 0);
}
}
close(fd);
}
static const char *ipv4Str(const uint32_t Address) {
static char str[] = {"255.225.255.255"};
uint8_t *ip = (uint8_t *)&Address;
snprintf(str, sizeof(str), "%d.%d.%d.%d", ip[3], ip[2], ip[1], ip[0]);
return str;
}
static const char *ipv6Str(const UCHAR Address[16]) {
static char str[64];
uint16_t ip[8];
int i;
for (i = 0; i < 8; i++) {
ip[i] = (Address[i*2]<<8) + Address[i*2+1];
}
snprintf(str, sizeof(str), "%x:%x:%x:%x:%x:%x:%x:%x",
ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7]);
return str;
}
void update_ipv4_address(const char *ifname, const char *ip, const char *gw, unsigned prefix)
{
char shell_cmd[128];
if (!ifname)
return;
if (!access("/sbin/ip", X_OK)) {
snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address flush dev %s", 4, ifname);
ql_system(shell_cmd);
snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address add %s/%u dev %s", 4, ip, prefix, ifname);
ql_system(shell_cmd);
//ping6 www.qq.com
snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d route add default via %s dev %s", 4, gw, ifname);
ql_system(shell_cmd);
} else {
unsigned n = (0xFFFFFFFF >> (32 - prefix)) << (32 - prefix);
// n = (n>>24) | (n>>8&0xff00) | (n<<8&0xff0000) | (n<<24);
snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s %s netmask %s", ifname, ip, ipv4Str(n));
ql_system(shell_cmd);
//Resetting default routes
snprintf(shell_cmd, sizeof(shell_cmd), "route del default dev %s", ifname);
while(!system(shell_cmd));
snprintf(shell_cmd, sizeof(shell_cmd), "route add default gw %s dev %s", gw, ifname);
ql_system(shell_cmd);
}
}
void update_ipv6_address(const char *ifname, const char *ip, const char *gw, unsigned prefix) {
char shell_cmd[128];
(void)gw;
if (!access("/sbin/ip", X_OK)) {
snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address flush dev %s", 6, ifname);
ql_system(shell_cmd);
snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address add %s/%u dev %s", 6, ip, prefix, ifname);
ql_system(shell_cmd);
//ping6 www.qq.com
snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d route add default dev %s", 6, ifname);
ql_system(shell_cmd);
} else {
snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s %s/%d", ifname, ip, prefix);
ql_system(shell_cmd);
snprintf(shell_cmd, sizeof(shell_cmd), "route -A inet6 add default dev %s", ifname);
ql_system(shell_cmd);
}
}
static void update_ip_address_by_qmi(const char *ifname, const IPV4_T *ipv4, const IPV6_T *ipv6) {
char *d1, *d2;
if (ipv4 && ipv4->Address) {
d1 = strdup(ipv4Str(ipv4->Address));
d2 = strdup(ipv4Str(ipv4->Gateway));
unsigned prefix = 0;
unsigned n = 0;
for (n = 0; n < 32; n++) {
if (ipv4->SubnetMask&((unsigned)1<<n)) {
prefix++;
}
}
update_ipv4_address(ifname, d1, d2, prefix);
free(d1); free(d2);
//Adding DNS
if (ipv4->DnsPrimary) {
d1 = strdup(ipv4Str(ipv4->DnsPrimary));
d2 = strdup(ipv4Str(ipv4->DnsSecondary ? ipv4->DnsSecondary : ipv4->DnsPrimary));
update_resolv_conf(4, ifname, d1, d2);
free(d1); free(d2);
}
}
if (ipv6 && ipv6->Address[0] && ipv6->PrefixLengthIPAddr) {
d1 = strdup(ipv6Str(ipv6->Address));
d2 = strdup(ipv6Str(ipv6->Gateway));
update_ipv6_address(ifname, d1, d2, ipv6->PrefixLengthIPAddr);
free(d1); free(d2);
//Adding DNS
if (ipv6->DnsPrimary[0]) {
d1 = strdup(ipv6Str(ipv6->DnsPrimary));
d2 = strdup(ipv6Str(ipv6->DnsSecondary[0] ? ipv6->DnsSecondary : ipv6->DnsPrimary));
update_resolv_conf(6, ifname, d1, d2);
free(d1); free(d2);
}
}
}
//#define QL_OPENWER_NETWORK_SETUP
#ifdef QL_OPENWER_NETWORK_SETUP
static const char *openwrt_lan = "br-lan";
static const char *openwrt_wan = "wwan0";
static int ql_openwrt_system(const char *cmd) {
int i;
int ret = 1;
char shell_cmd[128];
snprintf(shell_cmd, sizeof(shell_cmd), "%s 2>1 > /dev/null", cmd);
for (i = 0; i < 15; i++) {
dbg_time("%s", cmd);
ret = system(shell_cmd);
if (!ret)
break;
sleep(1);
}
return ret;
}
static int ql_openwrt_is_wan(const char *ifname) {
if (openwrt_lan == NULL) {
system("uci show network.wan.ifname");
}
if (strcmp(ifname, openwrt_wan))
return 0;
return 1;
}
static void ql_openwrt_setup_wan(const char *ifname, const IPV4_T *ipv4) {
FILE *fp = NULL;
char config[64];
snprintf(config, sizeof(config), "/tmp/rmnet_%s_ipv4config", ifname);
if (ipv4 == NULL) {
if (ql_openwrt_is_wan(ifname))
ql_openwrt_system("ifdown wan");
return;
}
fp = fopen(config, "w");
if (fp == NULL)
return;
fprintf(fp, "IFNAME=\"%s\"\n", ifname);
fprintf(fp, "PUBLIC_IP=\"%s\"\n", ipv4Str(ipv4->Address));
fprintf(fp, "NETMASK=\"%s\"\n", ipv4Str(ipv4->SubnetMask));
fprintf(fp, "GATEWAY=\"%s\"\n", ipv4Str(ipv4->Gateway));
fprintf(fp, "DNSSERVERS=\"%s", ipv4Str(ipv4->DnsPrimary));
if (ipv4->DnsSecondary != 0)
fprintf(fp, " %s", ipv4Str(ipv4->DnsSecondary));
fprintf(fp, "\"\n");
fclose(fp);
if (!ql_openwrt_is_wan(ifname))
return;
ql_openwrt_system("ifup wan");
}
static void ql_openwrt_setup_wan6(const char *ifname, const IPV6_T *ipv6) {
FILE *fp = NULL;
char config[64];
int first_ifup;
snprintf(config, sizeof(config), "/tmp/rmnet_%s_ipv6config", ifname);
if (ipv6 == NULL) {
if (ql_openwrt_is_wan(ifname))
ql_openwrt_system("ifdown wan6");
return;
}
first_ifup = (access(config, F_OK) != 0);
fp = fopen(config, "w");
if (fp == NULL)
return;
fprintf(fp, "IFNAME=\"%s\"\n", ifname);
fprintf(fp, "PUBLIC_IP=\"%s\"\n", ipv6Str(ipv6->Address));
fprintf(fp, "NETMASK=\"%s\"\n", ipv6Str(ipv6->SubnetMask));
fprintf(fp, "GATEWAY=\"%s\"\n", ipv6Str(ipv6->Gateway));
fprintf(fp, "PrefixLength=\"%d\"\n", ipv6->PrefixLengthIPAddr);
fprintf(fp, "DNSSERVERS=\"%s", ipv6Str(ipv6->DnsPrimary));
if (ipv6->DnsSecondary[0])
fprintf(fp, " %s", ipv6Str(ipv6->DnsSecondary));
fprintf(fp, "\"\n");
fclose(fp);
if (!ql_openwrt_is_wan(ifname))
return;
if (first_ifup)
ql_openwrt_system("ifup wan6");
else
ql_openwrt_system("/etc/init.d/network restart"); //make PC to release old IPV6 address, and RS new IPV6 address
#if 1 //TODO? why need this?
if (openwrt_lan) {
int i;
char shell_cmd[128];
UCHAR Address[16] = {0};
ql_openwrt_system(("ifstatus lan"));
for (i = 0; i < (ipv6->PrefixLengthIPAddr/8); i++)
Address[i] = ipv6->Address[i];
snprintf(shell_cmd, sizeof(shell_cmd), "ip route del %s/%u dev %s", ipv6Str(Address), ipv6->PrefixLengthIPAddr, ifname);
ql_openwrt_system(shell_cmd);
snprintf(shell_cmd, sizeof(shell_cmd), "ip route add %s/%u dev %s", ipv6Str(Address), ipv6->PrefixLengthIPAddr, openwrt_lan);
ql_system(shell_cmd);
}
#endif
}
#endif
void udhcpc_start(PROFILE_T *profile) {
char *ifname = profile->usbnet_adapter;
ql_set_driver_link_state(profile, 1);
if (profile->qmapnet_adapter[0]) {
ifname = profile->qmapnet_adapter;
}
if (profile->rawIP && profile->ipv4.Address && profile->ipv4.Mtu) {
ql_set_mtu(ifname, (profile->ipv4.Mtu));
}
if (strcmp(ifname, profile->usbnet_adapter)) {
ifc_set_state(profile->usbnet_adapter, 1);
if (ifc_get_flags(ifname)&IFF_UP) {
ifc_set_state(ifname, 0);
}
}
ifc_set_state(ifname, 1);
if (profile->ipv4.Address) {
if (profile->PCSCFIpv4Addr1)
dbg_time("pcscf1: %s", ipv4Str(profile->PCSCFIpv4Addr1));
if (profile->PCSCFIpv4Addr2)
dbg_time("pcscf2: %s", ipv4Str(profile->PCSCFIpv4Addr2));
}
if (profile->ipv6.Address[0] && profile->ipv6.PrefixLengthIPAddr) {
if (profile->PCSCFIpv6Addr1[0])
dbg_time("pcscf1: %s", ipv6Str(profile->PCSCFIpv6Addr1));
if (profile->PCSCFIpv6Addr2[0])
dbg_time("pcscf2: %s", ipv6Str(profile->PCSCFIpv6Addr2));
}
#if 1 //for bridge mode, only one public IP, so do udhcpc manually
if (ql_bridge_mode_detect(profile)) {
return;
}
#endif
//because must use udhcpc to obtain IP when working on ETH mode,
//so it is better also use udhcpc to obtain IP when working on IP mode.
//use the same policy for all modules
#if 0
if (profile->rawIP != 0) //mdm9x07/ec25,ec20 R2.0
{
update_ip_address_by_qmi(ifname, &profile->ipv4, &profile->ipv6);
return;
}
#endif
if (profile->ipv4.Address == 0)
goto set_ipv6;
if (profile->no_dhcp || profile->request_ops == &mbim_request_ops) { //lots of mbim modem do not support DHCP
update_ip_address_by_qmi(ifname, &profile->ipv4, NULL);
}
else
/* Do DHCP using busybox tools */
{
char udhcpc_cmd[128];
pthread_attr_t udhcpc_thread_attr;
pthread_t udhcpc_thread_id;
pthread_attr_init(&udhcpc_thread_attr);
pthread_attr_setdetachstate(&udhcpc_thread_attr, PTHREAD_CREATE_DETACHED);
#ifdef USE_DHCLIENT
snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "dhclient -4 -d --no-pid %s", ifname);
dhclient_alive++;
#else
if (access("/usr/share/udhcpc/default.script", X_OK)
&& access("/etc//udhcpc/default.script", X_OK)) {
dbg_time("No default.script found, it should be in '/usr/share/udhcpc/' or '/etc//udhcpc' depend on your udhcpc version!");
}
//-f,--foreground Run in foreground
//-b,--background Background if lease is not obtained
//-n,--now Exit if lease is not obtained
//-q,--quit Exit after obtaining lease
//-t,--retries N Send up to N discover packets (default 3)
snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "busybox udhcpc -f -n -q -t 5 -i %s", ifname);
#endif
#if 1 //for OpenWrt
if (!access("/lib/netifd/dhcp.script", X_OK) && !access("/sbin/ifup", X_OK) && !access("/sbin/ifstatus", X_OK)) {
#if 0 //20210415 do not promot these message
dbg_time("you are use OpenWrt?");
dbg_time("should not calling udhcpc manually?");
dbg_time("should modify /etc/config/network as below?");
dbg_time("config interface wan");
dbg_time("\toption ifname %s", ifname);
dbg_time("\toption proto dhcp");
dbg_time("should use \"/sbin/ifstaus wan\" to check %s 's status?", ifname);
#endif
}
#endif
#ifdef USE_DHCLIENT
pthread_create(&udhcpc_thread_id, &udhcpc_thread_attr, udhcpc_thread_function, (void*)strdup(udhcpc_cmd));
sleep(1);
#else
pthread_create(&udhcpc_thread_id, NULL, udhcpc_thread_function, (void*)strdup(udhcpc_cmd));
pthread_join(udhcpc_thread_id, NULL);
if (profile->request_ops == &atc_request_ops) {
profile->udhcpc_ip = 0;
ifc_get_addr(ifname, &profile->udhcpc_ip);
if (profile->udhcpc_ip != profile->ipv4.Address) {
unsigned char *l = (unsigned char *)&profile->udhcpc_ip;
unsigned char *r = (unsigned char *)&profile->ipv4.Address;
dbg_time("ERROR: IP from udhcpc (%d.%d.%d.%d) is different to IP from ATC (%d.%d.%d.%d)!",
l[0], l[1], l[2], l[3], r[0], r[1], r[2], r[3]);
ql_get_netcard_carrier_state(ifname); //miss udhcpc default.script or modem not report usb-net-cdc-linkup
}
}
if (profile->request_ops != &qmi_request_ops) { //only QMI modem support next fixup!
goto set_ipv6;
}
if (ql_raw_ip_mode_check(ifname, profile->ipv4.Address)) {
pthread_create(&udhcpc_thread_id, NULL, udhcpc_thread_function, (void*)strdup(udhcpc_cmd));
pthread_join(udhcpc_thread_id, NULL);
}
if (!ql_netcard_ipv4_address_check(ifname, qmi2addr(profile->ipv4.Address))) {
//no udhcpc's default.script exist, directly set ip and dns
update_ip_address_by_qmi(ifname, &profile->ipv4, NULL);
}
//Add by Demon. check default route
FILE *rt_fp = NULL;
char rt_cmd[128] = {0};
//Check if there is a default route.
snprintf(rt_cmd, sizeof(rt_cmd), "route -n | grep %s | awk '{print $1}' | grep 0.0.0.0", ifname);
rt_fp = popen((const char *)rt_cmd, "r");
if (rt_fp != NULL) {
char buf[20] = {0};
int found_default_rt = 0;
if (fgets(buf, sizeof(buf), rt_fp) != NULL) {
//Find the specified interface
found_default_rt = 1;
}
if (1 == found_default_rt) {
//dbg_time("Route items found for %s", ifname);
}
else {
dbg_time("Warning: No route items found for %s", ifname);
}
pclose(rt_fp);
}
//End by Demon.
#endif
}
#ifdef QL_OPENWER_NETWORK_SETUP
ql_openwrt_setup_wan(ifname, &profile->ipv4);
#endif
set_ipv6:
if (profile->ipv6.Address[0] && profile->ipv6.PrefixLengthIPAddr) {
#if 1
//module do not support DHCPv6, only support 'Router Solicit'
//and it seem if enable /proc/sys/net/ipv6/conf/all/forwarding, Kernel do not send RS
const char *forward_file = "/proc/sys/net/ipv6/conf/all/forwarding";
int forward_fd = open(forward_file, O_RDONLY);
if (forward_fd > 0) {
char forward_state[2];
if (read(forward_fd, forward_state, 2) == -1) {};
if (forward_state[0] == '1') {
//dbg_time("%s enabled, kernel maybe donot send 'Router Solicit'", forward_file);
}
close(forward_fd);
}
update_ip_address_by_qmi(ifname, NULL, &profile->ipv6);
if (profile->ipv6.DnsPrimary[0] || profile->ipv6.DnsSecondary[0]) {
char dns1str[64], dns2str[64];
if (profile->ipv6.DnsPrimary[0]) {
strcpy(dns1str, ipv6Str(profile->ipv6.DnsPrimary));
}
if (profile->ipv6.DnsSecondary[0]) {
strcpy(dns2str, ipv6Str(profile->ipv6.DnsSecondary));
}
update_resolv_conf(6, ifname, profile->ipv6.DnsPrimary[0] ? dns1str : NULL,
profile->ipv6.DnsSecondary[0] != '\0' ? dns2str : NULL);
}
#ifdef QL_OPENWER_NETWORK_SETUP
ql_openwrt_setup_wan6(ifname, &profile->ipv6);
#endif
#else
#ifdef USE_DHCLIENT
snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "dhclient -6 -d --no-pid %s", ifname);
dhclient_alive++;
#else
/*
DHCPv6: Dibbler - a portable DHCPv6
1. download from http://klub.com.pl/dhcpv6/
2. cross-compile
2.1 ./configure --host=arm-linux-gnueabihf
2.2 copy dibbler-client to your board
3. mkdir -p /var/log/dibbler/ /var/lib/ on your board
4. create /etc/dibbler/client.conf on your board, the content is
log-mode short
log-level 7
iface wwan0 {
ia
option dns-server
}
5. run "dibbler-client start" to get ipV6 address
6. run "route -A inet6 add default dev wwan0" to add default route
*/
snprintf(shell_cmd, sizeof(shell_cmd), "route -A inet6 add default %s", ifname);
ql_system(shell_cmd);
snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "dibbler-client run");
dibbler_client_alive++;
#endif
pthread_create(&udhcpc_thread_id, &udhcpc_thread_attr, udhcpc_thread_function, (void*)strdup(udhcpc_cmd));
#endif
}
}
void udhcpc_stop(PROFILE_T *profile) {
char *ifname = profile->usbnet_adapter;
char shell_cmd[128];
ql_set_driver_link_state(profile, 0);
if (profile->qmapnet_adapter[0]) {
ifname = profile->qmapnet_adapter;
}
#ifdef USE_DHCLIENT
if (dhclient_alive) {
system("killall dhclient");
dhclient_alive = 0;
}
#endif
if (dibbler_client_alive) {
if (system("killall dibbler-client")) {};
dibbler_client_alive = 0;
}
profile->udhcpc_ip = 0;
//it seems when call netif_carrier_on(), and netcard 's IP is "0.0.0.0", will cause netif_queue_stopped()
if (!access("/sbin/ip", X_OK))
snprintf(shell_cmd, sizeof(shell_cmd), "ip addr flush dev %s", ifname);
else
snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s 0.0.0.0", ifname);
ql_system(shell_cmd);
ifc_set_state(ifname, 0);
#ifdef QL_OPENWER_NETWORK_SETUP
ql_openwrt_setup_wan(ifname, NULL);
ql_openwrt_setup_wan6(ifname, NULL);
#endif
}

View File

@ -0,0 +1,179 @@
#include <sys/socket.h>
#include <sys/select.h>
#include <sys/types.h>
#include <net/if.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <endian.h>
#include "libmnl/ifutils.h"
#include "libmnl/dhcp/dhcp.h"
#include "util.h"
#include "QMIThread.h"
static int ql_raw_ip_mode_check(const char *ifname)
{
int fd;
char raw_ip[128];
char mode[2] = "X";
int mode_change = 0;
snprintf(raw_ip, sizeof(raw_ip), "/sys/class/net/%s/qmi/raw_ip", ifname);
if (access(raw_ip, F_OK))
return 0;
fd = open(raw_ip, O_RDWR | O_NONBLOCK | O_NOCTTY);
if (fd < 0)
{
dbg_time("%s %d fail to open(%s), errno:%d (%s)", __FILE__, __LINE__, raw_ip, errno, strerror(errno));
return 0;
}
read(fd, mode, 2);
if (mode[0] == '0' || mode[0] == 'N')
{
if_link_down(ifname);
dbg_time("echo Y > /sys/class/net/%s/qmi/raw_ip", ifname);
mode[0] = 'Y';
write(fd, mode, 2);
mode_change = 1;
if_link_up(ifname);
}
close(fd);
return mode_change;
}
void ql_set_driver_link_state(PROFILE_T *profile, int link_state)
{
char link_file[128];
int fd;
int new_state = 0;
snprintf(link_file, sizeof(link_file), "/sys/class/net/%s/link_state", profile->usbnet_adapter);
fd = open(link_file, O_RDWR | O_NONBLOCK | O_NOCTTY);
if (fd == -1)
{
if (errno != ENOENT)
dbg_time("Fail to access %s, errno: %d (%s)", link_file, errno, strerror(errno));
return;
}
if (profile->qmap_mode <= 1)
new_state = !!link_state;
else
{
//0x80 means link off this pdp
new_state = (link_state ? 0x00 : 0x80) + profile->pdp;
}
snprintf(link_file, sizeof(link_file), "%d\n", new_state);
write(fd, link_file, sizeof(link_file));
if (link_state == 0 && profile->qmap_mode > 1)
{
size_t rc;
lseek(fd, 0, SEEK_SET);
rc = read(fd, link_file, sizeof(link_file));
if (rc > 1 && (!strcasecmp(link_file, "0\n") || !strcasecmp(link_file, "0x0\n")))
{
if_link_down(profile->usbnet_adapter);
}
}
close(fd);
}
void udhcpc_start(PROFILE_T *profile)
{
char *ifname = profile->usbnet_adapter;
ql_set_driver_link_state(profile, 1);
ql_raw_ip_mode_check(ifname);
if (profile->qmapnet_adapter)
{
ifname = profile->qmapnet_adapter;
}
if (profile->rawIP && profile->ipv4.Address && profile->ipv4.Mtu)
{
if_set_mtu(ifname, (profile->ipv4.Mtu));
}
if (strcmp(ifname, profile->usbnet_adapter))
{
if_link_up(profile->usbnet_adapter);
}
if_link_up(ifname);
#if 1 //for bridge mode, only one public IP, so do udhcpc manually
if (ql_bridge_mode_detect(profile))
{
return;
}
#endif
// if use DHCP(should make with ${DHCP} src files)
// do_dhcp(ifname);
// return 0;
/* IPv4 Addr Info */
if (profile->ipv4.Address)
{
dbg_time("IPv4 MTU: %d", profile->ipv4.Mtu);
dbg_time("IPv4 Address: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.Address)));
dbg_time("IPv4 Netmask: %d", mask_to_prefix_v4(ntohl(profile->ipv4.SubnetMask)));
dbg_time("IPv4 Gateway: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.Gateway)));
dbg_time("IPv4 DNS1: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.DnsPrimary)));
dbg_time("IPv4 DNS2: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.DnsSecondary)));
if_set_network_v4(ifname, ntohl(profile->ipv4.Address),
mask_to_prefix_v4(profile->ipv4.SubnetMask),
ntohl(profile->ipv4.Gateway),
ntohl(profile->ipv4.DnsPrimary),
ntohl(profile->ipv4.DnsSecondary));
}
if (profile->ipv6.Address[0] && profile->ipv6.PrefixLengthIPAddr)
{
//module do not support DHCPv6, only support 'Router Solicit'
//and it seem if enable /proc/sys/net/ipv6/conf/all/forwarding, Kernel do not send RS
const char *forward_file = "/proc/sys/net/ipv6/conf/all/forwarding";
int forward_fd = open(forward_file, O_RDONLY);
if (forward_fd > 0)
{
char forward_state[2];
read(forward_fd, forward_state, 2);
if (forward_state[0] == '1')
{
dbg_time("%s enabled, kernel maybe donot send 'Router Solicit'", forward_file);
}
close(forward_fd);
}
dbg_time("IPv6 MTU: %d", profile->ipv6.Mtu);
dbg_time("IPv6 Address: %s", ipaddr_to_string_v6(profile->ipv6.Address));
dbg_time("IPv6 Netmask: %d", profile->ipv6.PrefixLengthIPAddr);
dbg_time("IPv6 Gateway: %s", ipaddr_to_string_v6(profile->ipv6.Gateway));
dbg_time("IPv6 DNS1: %s", ipaddr_to_string_v6(profile->ipv6.DnsPrimary));
dbg_time("IPv6 DNS2: %s", ipaddr_to_string_v6(profile->ipv6.DnsSecondary));
if_set_network_v6(ifname, profile->ipv6.Address, profile->ipv6.PrefixLengthIPAddr,
profile->ipv6.Gateway, profile->ipv6.DnsPrimary, profile->ipv6.DnsSecondary);
}
}
void udhcpc_stop(PROFILE_T *profile)
{
char *ifname = profile->usbnet_adapter;
ql_set_driver_link_state(profile, 0);
if (profile->qmapnet_adapter)
{
ifname = profile->qmapnet_adapter;
}
if_link_down(ifname);
if_flush_v4_addr(ifname);
if_flush_v6_addr(ifname);
}

View File

@ -0,0 +1,132 @@
/******************************************************************************
@file udhcpc.c
@brief call DHCP tools to obtain IP address.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include <sys/socket.h>
#include <sys/select.h>
#include <sys/types.h>
#include <net/if.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <endian.h>
#include "util.h"
#include "QMIThread.h"
#define IFDOWN_SCRIPT "/etc/quectel/ifdown.sh"
#define IFUP_SCRIPT "/etc/quectel/ifup.sh"
static int ql_system(const char *shell_cmd)
{
dbg_time("%s", shell_cmd);
return system(shell_cmd);
}
uint32_t mask_to_prefix_v4(uint32_t mask)
{
uint32_t prefix = 0;
while (mask)
{
mask = mask & (mask - 1);
prefix++;
}
return prefix;
}
uint32_t mask_from_prefix_v4(uint32_t prefix)
{
return ~((1 << (32 - prefix)) - 1);
}
/* mask in int */
uint32_t broadcast_from_mask(uint32_t ip, uint32_t mask)
{
return (ip & mask) | (~mask);
}
const char *ipaddr_to_string_v4(in_addr_t ipaddr, char *buf, size_t size)
{
// static char buf[INET6_ADDRSTRLEN] = {'\0'};
buf[0] = '\0';
uint32_t addr = ipaddr;
return inet_ntop(AF_INET, &addr, buf, size);
}
const char *ipaddr_to_string_v6(uint8_t *ipaddr, char *buf, size_t size)
{
buf[0] = '\0';
return inet_ntop(AF_INET6, ipaddr, buf, size);
}
/**
* For more details see default.script
*
* The main aim of this function is offload ip management to script, CM has not interest in manage IP address.
* just tell script all the info about ip, mask, router, dns...
*/
void udhcpc_start(PROFILE_T *profile)
{
char shell_cmd[1024];
char ip[128];
char subnet[128];
char broadcast[128];
char router[128];
char domain1[128];
char domain2[128];
if (NULL == getenv(IFUP_SCRIPT))
return;
// manage IPv4???
// check rawip ???
snprintf(shell_cmd, sizeof(shell_cmd),
" netiface=%s interface=%s mtu=%u ip=%s subnet=%s broadcast=%s router=%s"
" domain=\"%s %s\" %s",
profile->usbnet_adapter,
profile->qmapnet_adapter ? profile->qmapnet_adapter : profile->usbnet_adapter,
profile->ipv4.Mtu,
ipaddr_to_string_v4(ntohl(profile->ipv4.Address), ip, sizeof(ip)),
ipaddr_to_string_v4(ntohl(profile->ipv4.SubnetMask), subnet, sizeof(subnet)),
ipaddr_to_string_v4(ntohl(broadcast_from_mask(profile->ipv4.Address, profile->ipv4.SubnetMask)),
broadcast, sizeof(broadcast)),
ipaddr_to_string_v4(ntohl(profile->ipv4.Gateway), router, sizeof(router)),
ipaddr_to_string_v4(ntohl(profile->ipv4.DnsPrimary), domain1, sizeof(domain1)),
ipaddr_to_string_v4(ntohl(profile->ipv4.DnsSecondary), domain2, sizeof(domain2)),
getenv(IFUP_SCRIPT));
ql_system(shell_cmd);
// manage IPv6???
}
/**
* For more details see default.script
*
* The main aim of this function is offload ip management to script, CM has not interest in manage IP address.
* just tell script all the info about ip, mask, router, dns...
*/
void udhcpc_stop(PROFILE_T *profile)
{
char shell_cmd[1024];
if (NULL == getenv(IFDOWN_SCRIPT))
return;
snprintf(shell_cmd, sizeof(shell_cmd),
"netiface=%s interface=%s %s",
profile->usbnet_adapter,
profile->qmapnet_adapter ? profile->qmapnet_adapter : profile->usbnet_adapter,
getenv(IFDOWN_SCRIPT));
ql_system(shell_cmd);
}

View File

@ -0,0 +1,361 @@
/******************************************************************************
@file util.c
@brief some utils for this QCM tool.
DESCRIPTION
Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules.
INITIALIZATION AND SEQUENCING REQUIREMENTS
None.
---------------------------------------------------------------------------
Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved.
Quectel Wireless Solution Proprietary and Confidential.
---------------------------------------------------------------------------
******************************************************************************/
#include <sys/time.h>
#include <net/if.h>
typedef unsigned short sa_family_t;
#include <linux/un.h>
#if defined(__STDC__)
#include <stdarg.h>
#define __V(x) x
#else
#include <varargs.h>
#define __V(x) (va_alist) va_dcl
#define const
#define volatile
#endif
#include <syslog.h>
#include "QMIThread.h"
pthread_mutex_t cm_command_mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cm_command_cond = PTHREAD_COND_INITIALIZER;
unsigned int cm_recv_buf[1024];
int cm_open_dev(const char *dev) {
int fd;
fd = open(dev, O_RDWR | O_NONBLOCK | O_NOCTTY);
if (fd != -1) {
fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK);
fcntl(fd, F_SETFD, FD_CLOEXEC);
if (!strncmp(dev, "/dev/tty", strlen("/dev/tty")))
{
//disable echo on serial ports
struct termios ios;
memset(&ios, 0, sizeof(ios));
tcgetattr( fd, &ios );
cfmakeraw(&ios);
cfsetispeed(&ios, B115200);
cfsetospeed(&ios, B115200);
tcsetattr( fd, TCSANOW, &ios );
tcflush(fd, TCIOFLUSH);
}
} else {
dbg_time("Failed to open %s, errno: %d (%s)", dev, errno, strerror(errno));
}
return fd;
}
int cm_open_proxy(const char *name) {
int sockfd = -1;
int reuse_addr = 1;
struct sockaddr_un sockaddr;
socklen_t alen;
/*Create server socket*/
sockfd = socket(AF_LOCAL, SOCK_STREAM, 0);
if (sockfd < 0)
return sockfd;
memset(&sockaddr, 0, sizeof(sockaddr));
sockaddr.sun_family = AF_LOCAL;
sockaddr.sun_path[0] = 0;
memcpy(sockaddr.sun_path + 1, name, strlen(name) );
alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1;
if(connect(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) {
close(sockfd);
dbg_time("connect %s errno: %d (%s)", name, errno, strerror(errno));
return -1;
}
setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr));
fcntl(sockfd, F_SETFL, fcntl(sockfd,F_GETFL) | O_NONBLOCK);
fcntl(sockfd, F_SETFD, FD_CLOEXEC);
dbg_time("connect to %s sockfd = %d", name, sockfd);
return sockfd;
}
static void setTimespecRelative(struct timespec *p_ts, long long msec)
{
struct timeval tv;
gettimeofday(&tv, (struct timezone *) NULL);
/* what's really funny about this is that I know
pthread_cond_timedwait just turns around and makes this
a relative time again */
p_ts->tv_sec = tv.tv_sec + (msec / 1000);
p_ts->tv_nsec = (tv.tv_usec + (msec % 1000) * 1000L ) * 1000L;
if ((unsigned long)p_ts->tv_nsec >= 1000000000UL) {
p_ts->tv_sec += 1;
p_ts->tv_nsec -= 1000000000UL;
}
}
int pthread_cond_timeout_np(pthread_cond_t *cond, pthread_mutex_t * mutex, unsigned msecs) {
if (msecs != 0) {
unsigned i;
unsigned t = msecs/4;
int ret = 0;
if (t == 0)
t = 1;
for (i = 0; i < msecs; i += t) {
struct timespec ts;
setTimespecRelative(&ts, t);
//very old uclibc do not support pthread_condattr_setclock(CLOCK_MONOTONIC)
ret = pthread_cond_timedwait(cond, mutex, &ts); //to advoid system time change
if (ret != ETIMEDOUT) {
if(ret) dbg_time("ret=%d, msecs=%u, t=%u", ret, msecs, t);
break;
}
}
return ret;
} else {
return pthread_cond_wait(cond, mutex);
}
}
const char * get_time(void) {
static char time_buf[128];
struct timeval tv;
time_t time;
suseconds_t millitm;
struct tm *ti;
gettimeofday (&tv, NULL);
time= tv.tv_sec;
millitm = (tv.tv_usec + 500) / 1000;
if (millitm == 1000) {
++time;
millitm = 0;
}
ti = localtime(&time);
sprintf(time_buf, "%02d-%02d_%02d:%02d:%02d:%03d", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm);
return time_buf;
}
unsigned long clock_msec(void)
{
struct timespec tm;
clock_gettime( CLOCK_MONOTONIC, &tm);
return (unsigned long)(tm.tv_sec*1000 + (tm.tv_nsec/1000000));
}
FILE *logfilefp = NULL;
void update_resolv_conf(int iptype, const char *ifname, const char *dns1, const char *dns2) {
const char *dns_file = "/etc/resolv.conf";
FILE *dns_fp;
char dns_line[256];
#define MAX_DNS 16
char *dns_info[MAX_DNS];
char dns_tag[64];
int dns_match = 0;
int i;
snprintf(dns_tag, sizeof(dns_tag), "# IPV%d %s", iptype, ifname);
for (i = 0; i < MAX_DNS; i++)
dns_info[i] = NULL;
dns_fp = fopen(dns_file, "r");
if (dns_fp) {
i = 0;
dns_line[sizeof(dns_line)-1] = '\0';
while((fgets(dns_line, sizeof(dns_line)-1, dns_fp)) != NULL) {
if ((strlen(dns_line) > 1) && (dns_line[strlen(dns_line) - 1] == '\n'))
dns_line[strlen(dns_line) - 1] = '\0';
//dbg_time("%s", dns_line);
if (strstr(dns_line, dns_tag)) {
dns_match++;
continue;
}
dns_info[i++] = strdup(dns_line);
if (i == MAX_DNS)
break;
}
fclose(dns_fp);
}
else if (errno != ENOENT) {
dbg_time("fopen %s fail, errno:%d (%s)", dns_file, errno, strerror(errno));
return;
}
if (dns1 == NULL && dns_match == 0)
return;
dns_fp = fopen(dns_file, "w");
if (dns_fp) {
if (dns1)
fprintf(dns_fp, "nameserver %s %s\n", dns1, dns_tag);
if (dns2)
fprintf(dns_fp, "nameserver %s %s\n", dns2, dns_tag);
for (i = 0; i < MAX_DNS && dns_info[i]; i++)
fprintf(dns_fp, "%s\n", dns_info[i]);
fclose(dns_fp);
}
else {
dbg_time("fopen %s fail, errno:%d (%s)", dns_file, errno, strerror(errno));
}
for (i = 0; i < MAX_DNS && dns_info[i]; i++)
free(dns_info[i]);
}
pid_t getpid_by_pdp(int pdp, const char* program_name)
{
glob_t gt;
int ret;
char filter[16];
pid_t pid;
snprintf(filter, sizeof(filter), "-n %d", pdp);
ret = glob("/proc/*/cmdline", GLOB_NOSORT, NULL, &gt);
if (ret != 0) {
dbg_time("glob error, errno = %d(%s)", errno, strerror(errno));
return -1;
} else {
int i = 0, fd = -1;
ssize_t nreads;
char cmdline[512] = {0};
for (i = 0; i < (int)gt.gl_pathc; i++) {
fd = open(gt.gl_pathv[i], O_RDONLY);
if (fd == -1) {
dbg_time("open %s failed, errno = %d(%s)", gt.gl_pathv[i], errno, strerror(errno));
globfree(&gt);
return -1;
}
nreads = read(fd, cmdline, sizeof(cmdline));
if (nreads > 0) {
int pos = 0;
while (pos < nreads-1) {
if (cmdline[pos] == '\0')
cmdline[pos] = ' '; // space
pos++;
}
// printf("%s\n", cmdline);
}
if (strstr(cmdline, program_name) && strstr(cmdline, filter)) {
char path[64] = {0};
char pidstr[64] = {0};
char *p;
dbg_time("%s: %s", gt.gl_pathv[i], cmdline);
strcpy(path, gt.gl_pathv[i]);
p = strstr(gt.gl_pathv[i], "/cmdline");
*p = '\0';
while (*(--p) != '/') ;
strcpy(pidstr, p+1);
pid = atoi(pidstr);
globfree(&gt);
return pid;
}
}
}
globfree(&gt);
return -1;
}
void ql_get_driver_rmnet_info(PROFILE_T *profile, RMNET_INFO *rmnet_info) {
int ifc_ctl_sock;
struct ifreq ifr;
int rc;
int request = 0x89F3;
unsigned char data[512];
memset(rmnet_info, 0x00, sizeof(*rmnet_info));
ifc_ctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
if (ifc_ctl_sock <= 0) {
dbg_time("socket() failed: %s\n", strerror(errno));
return;
}
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, profile->usbnet_adapter, IFNAMSIZ);
ifr.ifr_name[IFNAMSIZ - 1] = 0;
ifr.ifr_ifru.ifru_data = (void *)data;
rc = ioctl(ifc_ctl_sock, request, &ifr);
if (rc < 0) {
if (errno != ENOTSUP)
dbg_time("ioctl(0x%x, qmap_settings) errno:%d (%s), rc=%d", request, errno, strerror(errno), rc);
}
else {
memcpy(rmnet_info, data, sizeof(*rmnet_info));
}
close(ifc_ctl_sock);
}
void ql_set_driver_qmap_setting(PROFILE_T *profile, QMAP_SETTING *qmap_settings) {
int ifc_ctl_sock;
struct ifreq ifr;
int rc;
int request = 0x89F2;
ifc_ctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
if (ifc_ctl_sock <= 0) {
dbg_time("socket() failed: %s\n", strerror(errno));
return;
}
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, profile->usbnet_adapter, IFNAMSIZ);
ifr.ifr_name[IFNAMSIZ - 1] = 0;
ifr.ifr_ifru.ifru_data = (void *)qmap_settings;
rc = ioctl(ifc_ctl_sock, request, &ifr);
if (rc < 0) {
dbg_time("ioctl(0x%x, qmap_settings) failed: %s, rc=%d", request, strerror(errno), rc);
}
close(ifc_ctl_sock);
}
void no_trunc_strncpy(char *dest, const char *src, size_t dest_size)
{
size_t i = 0;
for (i = 0; i < dest_size && *src; i++) {
*dest++ = *src++;
}
*dest = 0;
}

View File

@ -0,0 +1,52 @@
/**
@file
util.h
@brief
This file provides the definitions, and declares some common APIs for list-algorithm.
*/
#ifndef _UTILS_H_
#define _UTILS_H_
#include <stddef.h>
#include <glob.h>
struct listnode
{
struct listnode *next;
struct listnode *prev;
};
#define node_to_item(node, container, member) \
(container *) (((char*) (node)) - offsetof(container, member))
#define list_declare(name) \
struct listnode name = { \
.next = &name, \
.prev = &name, \
}
#define list_for_each(node, list) \
for (node = (list)->next; node != (list); node = node->next)
#define list_for_each_reverse(node, list) \
for (node = (list)->prev; node != (list); node = node->prev)
void list_init(struct listnode *list);
void list_add_tail(struct listnode *list, struct listnode *item);
void list_add_head(struct listnode *head, struct listnode *item);
void list_remove(struct listnode *item);
#define list_empty(list) ((list) == (list)->next)
#define list_head(list) ((list)->next)
#define list_tail(list) ((list)->prev)
int epoll_register(int epoll_fd, int fd, unsigned int events);
int epoll_deregister(int epoll_fd, int fd);
const char * get_time(void);
unsigned long clock_msec(void);
pid_t getpid_by_pdp(int, const char*);
#endif

View File

@ -0,0 +1,51 @@
#
# Copyright (C) 2015 OpenWrt.org
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
include $(TOPDIR)/rules.mk
PKG_NAME:=pcie_mhi
PKG_VERSION:=1.3.6
PKG_RELEASE:=2
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
PKG_BUILD_PARALLEL:=1
PKG_BUILD_FLAGS:=gc-sections lto
define KernelPackage/pcie_mhi
SUBMENU:=WWAN Support
TITLE:=Kernel pcie driver for MHI device
DEPENDS:=+pciids +pciutils +quectel-CM-5G +kmod-qmi_wwan_q
FILES:=$(PKG_BUILD_DIR)/pcie_mhi.ko
AUTOLOAD:=$(call AutoLoad,90,pcie_mhi)
endef
define KernelPackage/pcie_mhi/description
Kernel module for register a custom pciemhi platform device.
endef
EXTRA_CFLAGS+= \
-I$(STAGING_DIR)/usr/include/qca-nss-drv \
-Wno-unused-function
MAKE_OPTS:= \
ARCH="$(LINUX_KARCH)" \
CROSS_COMPILE="$(TARGET_CROSS)" \
EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \
CXXFLAGS="$(TARGET_CXXFLAGS)" \
M="$(PKG_BUILD_DIR)" \
$(EXTRA_KCONFIG)
define Build/Compile
+$(MAKE) -C "$(LINUX_DIR)" $(strip $(MAKE_OPTS)) \
$(KERNEL_MAKE_FLAGS) \
$(PKG_JOBS) \
modules
endef
$(eval $(call KernelPackage,pcie_mhi))

View File

@ -0,0 +1,34 @@
#ccflags-y += -g
obj-m += pcie_mhi.o
pcie_mhi-objs := core/mhi_init.o core/mhi_main.o core/mhi_pm.o core/mhi_boot.o core/mhi_dtr.o controllers/mhi_qti.o
pcie_mhi-objs += devices/mhi_uci.o
ifeq (1,1)
pcie_mhi-objs += devices/mhi_netdev_quectel.o
else
pcie_mhi-objs += devices/mhi_netdev.o
pcie_mhi-objs += devices/rmnet_handler.o
endif
PWD := $(shell pwd)
ifeq ($(ARCH),)
ARCH := $(shell uname -m)
endif
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE :=
endif
ifeq ($(KDIR),)
KDIR := /lib/modules/$(shell uname -r)/build
endif
pcie_mhi: clean
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules
#cp pcie_mhi.ko /tftpboot/
clean:
$(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean
find . -name *.o.ur-safe | xargs rm -f
install: pcie_mhi
sudo cp pcie_mhi.ko /lib/modules/${shell uname -r}/kernel/drivers/pci/
sudo depmod

View File

@ -0,0 +1,36 @@
1. porting pcie_mhi driver as next
$ git diff drivers/Makefile
diff --git a/drivers/Makefile b/drivers/Makefile
index 77fbc52..e45837e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -184,3 +184,4 @@ obj-$(CONFIG_FPGA) += fpga/
obj-$(CONFIG_FSI) += fsi/
obj-$(CONFIG_TEE) += tee/
obj-$(CONFIG_MULTIPLEXER) += mux/
+obj-y += pcie_mhi/
$ tree drivers/pcie_mhi/ -L 1
drivers/pcie_mhi/
controllers
core
devices
Makefile
2. check RG500 attach pcie_mhi driver successful
root@OpenWrt:/# lspci
00:00.0 Class 0604: 17cb:0302
01:00.0 Class ff00: 17cb:0306
root@OpenWrt:~# dmesg | grep mhi
[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6
[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306
3. how to use, see next logs
log/QXDM_OVER_PCIE.txt
log/AT_OVER_PCIE.txt
log/MBIM_OVER_PCIE.txt
log/QMI_OVER_PCIE.txt

View File

@ -0,0 +1,103 @@
Release Notes
[V1.3.4]
Date: 12/8/2022
enhancement:
1. only allow to enable autosuspend when module is in MHI_EE_AMSS
2. show pcie link speed and width when driver probe
3. check pcie link status by read pcie vid and pid when driver probe,
if pcie link is down, return -EIO
4. support RM520 (1eac:1004)
5. support qmap command packet
fix:
1. fix tx queue is wrong stop when do uplink TPUT
2. fix after QFirehose, module fail to bootup at very small probability
3. mhi uci add mutex lock for concurrent reads/writes
[V1.3.3]
Date: 30/6/2022
enhancement:
1. remove one un-necessary kmalloc when do qfirehose
2. support mhi monitor (like usbmon), usage: cat /sys/kernel/debug/mhi_q/0306_00\:01.00/mhimon
3. set ring size of event 0 to 256 (from 1024), required by x6x
4. support PCIE local network card mhi_swip0 (chan 46/47), default disabled
5. porting IPQ5018 mhi rate controll code from spf11.5
6. set pcie rmnet download max qmap packet size to 15KB (same to IPQ MHI Driver)
7. support set different mac address for rmnet net card
8. when mhi netdev fail to malloc, use delay_work instead work
9. optimize code for 'when driver load, modem is still in MHI_EE_PTHRU'
fix:
1. Fix not synchronize access rp/wp when mhi_queue_xxx and mhi_process_xxx_ring run on different CPU
2. set dma mask when driver probe, some SOC like rpi_4 need it
[V1.3.2]
Date: 12/16/2021
enhancement:
1. support Linux Kernel V5.14
2. mhi_netdev_quectel.c do not print log in softirq context
[V1.3.1]
Date: 9/26/2021
enhancement:
fix:
[V1.3.0.19]
Date: 9/18/2021
enhancement:
1. support sdx62 (17cb:0308)
2. support IPQ5018's NSS
3. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c
and pcie_mhi.ko must load after then rmnet_nss.ko
4. allow bhi irq is not 0 (for ipq5018)
fix:
[V1.3.0.18]
Date: 4/14/2021
enhancement:
1. support mbim multiple call, usage:
# insmod pcie_mhi.ko mhi_mbim_enabeld=1 qmap_mode=4
# quectel-mbim-proxy -d /dev/mhi_MBIM &
# quectel-CM -n X
fix:
[V1.3.0.17]
Date: 3/11/2021
enhancement:
fix:
1. fix CPU loading very high when TPUT test when only one MSI interrupt
2. fix error on latest X24 modem
[V1.3.0.16]
Date: 11/18/2020
enhancement:
fix:
1. add ring size to 32, for in-bound chan, if one ring is full, modem will not generate MSI interrupt for all chan
[V1.3.0.15]
Date: 10/30/2020
enhancement:
1. support multi-modems, named as /dev/mhi_<chan_name>X
fix:
1. fix compile error on kernel v5.8
[V1.3.0.14]
Date: 10/9/2020
enhancement:
1. suppport EM120&EM160
fix:
1. fix compile error on kernel v5.6
2. support runtime suspend
[V1.3.0.13]
Date: 9/7/2020
enhancement:
1. suppport EM120&EM160
fix:
1. fix error on X55 + PCIE2.0(e.g IPQ4019)
2. support runtime suspend
[V1.3.0.12]
Date: 7/7/2020
enhancement:
1. suppport create only none netcard (enabled by marco MHI_NETDEV_ONE_CARD_MODE),
fix:

View File

@ -0,0 +1,13 @@
menu "MHI controllers"
config MHI_QTI
tristate "MHI QTI"
depends on MHI_BUS
help
If you say yes to this option, MHI bus support for QTI modem chipsets
will be enabled. QTI PCIe based modems uses MHI as the communication
protocol. MHI control driver is the bus master for such modems. As the
bus master driver, it oversees power management operations such as
suspend, resume, powering on and off the device.
endmenu

View File

@ -0,0 +1 @@
obj-$(CONFIG_MHI_QTI) += mhi_qti.o mhi_arch_qti.o

View File

@ -0,0 +1,275 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/async.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/msm-bus.h>
#include <linux/msm_pcie.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "../core/mhi.h"
#include "mhi_qti.h"
struct arch_info {
struct mhi_dev *mhi_dev;
struct msm_bus_scale_pdata *msm_bus_pdata;
u32 bus_client;
struct pci_saved_state *pcie_state;
struct pci_saved_state *ref_pcie_state;
struct dma_iommu_mapping *mapping;
};
struct mhi_bl_info {
struct mhi_device *mhi_device;
async_cookie_t cookie;
void *ipc_log;
};
/* ipc log markings */
#define DLOG "Dev->Host: "
#define HLOG "Host: "
#ifdef CONFIG_MHI_DEBUG
#define MHI_IPC_LOG_PAGES (100)
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_VERBOSE;
#else
#define MHI_IPC_LOG_PAGES (10)
enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR;
#endif
static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
MHI_LOG("Setting bus request to index %d\n", index);
if (arch_info->bus_client)
return msm_bus_scale_client_update_request(
arch_info->bus_client,
index);
/* default return success */
return 0;
}
static void mhi_bl_dl_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev);
char *buf = mhi_result->buf_addr;
/* force a null at last character */
buf[mhi_result->bytes_xferd - 1] = 0;
ipc_log_string(mhi_bl_info->ipc_log, "%s %s", DLOG, buf);
}
static void mhi_bl_dummy_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
}
static void mhi_bl_remove(struct mhi_device *mhi_dev)
{
struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev);
ipc_log_string(mhi_bl_info->ipc_log, HLOG "Received Remove notif.\n");
/* wait for boot monitor to exit */
async_synchronize_cookie(mhi_bl_info->cookie + 1);
}
static void mhi_bl_boot_monitor(void *data, async_cookie_t cookie)
{
struct mhi_bl_info *mhi_bl_info = data;
struct mhi_device *mhi_device = mhi_bl_info->mhi_device;
struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl;
/* 15 sec timeout for booting device */
const u32 timeout = msecs_to_jiffies(15000);
/* wait for device to enter boot stage */
wait_event_timeout(mhi_cntrl->state_event, mhi_cntrl->ee == MHI_EE_AMSS
|| mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION,
timeout);
if (mhi_cntrl->ee == MHI_EE_AMSS) {
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Device successfully booted to mission mode\n");
mhi_unprepare_from_transfer(mhi_device);
} else {
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Device failed to boot to mission mode, ee = %s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
}
}
static int mhi_bl_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
char node_name[32];
struct mhi_bl_info *mhi_bl_info;
mhi_bl_info = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_bl_info),
GFP_KERNEL);
if (!mhi_bl_info)
return -ENOMEM;
snprintf(node_name, sizeof(node_name), "mhi_bl_%04x_%02u.%02u.%02u",
mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot);
mhi_bl_info->ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES,
node_name, 0);
if (!mhi_bl_info->ipc_log)
return -EINVAL;
mhi_bl_info->mhi_device = mhi_dev;
mhi_device_set_devdata(mhi_dev, mhi_bl_info);
ipc_log_string(mhi_bl_info->ipc_log, HLOG
"Entered SBL, Session ID:0x%x\n",
mhi_dev->mhi_cntrl->session_id);
/* start a thread to monitor entering mission mode */
mhi_bl_info->cookie = async_schedule(mhi_bl_boot_monitor, mhi_bl_info);
return 0;
}
static const struct mhi_device_id mhi_bl_match_table[] = {
{ .chan = "BL" },
{},
};
static struct mhi_driver mhi_bl_driver = {
.id_table = mhi_bl_match_table,
.remove = mhi_bl_remove,
.probe = mhi_bl_probe,
.ul_xfer_cb = mhi_bl_dummy_cb,
.dl_xfer_cb = mhi_bl_dl_cb,
.driver = {
.name = "MHI_BL",
.owner = THIS_MODULE,
},
};
int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
char node[32];
if (!arch_info) {
arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev,
sizeof(*arch_info), GFP_KERNEL);
if (!arch_info)
return -ENOMEM;
mhi_dev->arch_info = arch_info;
snprintf(node, sizeof(node), "mhi_%04x_%02u.%02u.%02u",
mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
mhi_cntrl->slot);
mhi_cntrl->log_buf = ipc_log_context_create(MHI_IPC_LOG_PAGES,
node, 0);
mhi_cntrl->log_lvl = mhi_ipc_log_lvl;
/* save reference state for pcie config space */
arch_info->ref_pcie_state = pci_store_saved_state(
mhi_dev->pci_dev);
mhi_driver_register(&mhi_bl_driver);
}
return mhi_arch_set_bus_request(mhi_cntrl, 1);
}
void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
{
mhi_arch_set_bus_request(mhi_cntrl, 0);
}
int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
MHI_LOG("Entered\n");
if (graceful) {
pci_clear_master(pci_dev);
ret = pci_save_state(mhi_dev->pci_dev);
if (ret) {
MHI_ERR("Failed with pci_save_state, ret:%d\n", ret);
return ret;
}
arch_info->pcie_state = pci_store_saved_state(pci_dev);
pci_disable_device(pci_dev);
}
/*
* We will always attempt to put link into D3hot, however
* link down may have happened due to error fatal, so
* ignoring the return code
*/
pci_set_power_state(pci_dev, PCI_D3hot);
/* release the resources */
mhi_arch_set_bus_request(mhi_cntrl, 0);
MHI_LOG("Exited\n");
return 0;
}
int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
MHI_LOG("Entered\n");
/* request resources and establish link trainning */
ret = mhi_arch_set_bus_request(mhi_cntrl, 1);
if (ret)
MHI_LOG("Could not set bus frequency, ret:%d\n", ret);
ret = pci_set_power_state(pci_dev, PCI_D0);
if (ret) {
MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret);
return ret;
}
ret = pci_enable_device(pci_dev);
if (ret) {
MHI_ERR("Failed to enable device, ret:%d\n", ret);
return ret;
}
ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state);
if (ret)
MHI_LOG("Failed to load saved cfg state\n");
pci_restore_state(pci_dev);
pci_set_master(pci_dev);
MHI_LOG("Exited\n");
return 0;
}

View File

@ -0,0 +1,715 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/version.h>
#include "../core/mhi.h"
#include "mhi_qcom.h"
#if 1
#ifndef PCI_IRQ_MSI
#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 ))
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
int nvec = maxvec;
int rc;
if (maxvec < minvec)
return -ERANGE;
do {
rc = pci_enable_msi_block(dev, nvec);
if (rc < 0) {
return rc;
} else if (rc > 0) {
if (rc < minvec)
return -ENOSPC;
nvec = rc;
}
} while (rc);
return nvec;
}
#endif
static int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags)
{
return pci_enable_msi_range(dev, min_vecs, max_vecs);
}
static void pci_free_irq_vectors(struct pci_dev *dev)
{
pci_disable_msi(dev);
}
static int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
return dev->irq + nr;
}
#endif
#endif
static struct pci_device_id mhi_pcie_device_id[] = {
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, //SDX20
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX24
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55
{PCI_DEVICE(0x2C7C, 0x0512)},
{PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)},
{0},
};
MODULE_DEVICE_TABLE(pci, mhi_pcie_device_id);
static struct pci_driver mhi_pcie_driver;
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_dev *pci_dev = mhi_dev->pci_dev;
pci_free_irq_vectors(pci_dev);
iounmap(mhi_cntrl->regs);
mhi_cntrl->regs = NULL;
pci_clear_master(pci_dev);
pci_release_region(pci_dev, mhi_dev->resn);
pci_disable_device(pci_dev);
}
static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_dev *pci_dev = mhi_dev->pci_dev;
int ret;
resource_size_t start, len;
int i;
mhi_dev->resn = MHI_PCI_BAR_NUM;
ret = pci_assign_resource(pci_dev, mhi_dev->resn);
if (ret) {
MHI_ERR("Error assign pci resources, ret:%d\n", ret);
return ret;
}
ret = pci_enable_device(pci_dev);
if (ret) {
MHI_ERR("Error enabling device, ret:%d\n", ret);
goto error_enable_device;
}
ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi");
if (ret) {
MHI_ERR("Error pci_request_region, ret:%d\n", ret);
goto error_request_region;
}
pci_set_master(pci_dev);
start = pci_resource_start(pci_dev, mhi_dev->resn);
len = pci_resource_len(pci_dev, mhi_dev->resn);
mhi_cntrl->regs = ioremap_nocache(start, len);
MHI_LOG("mhi_cntrl->regs = %p\n", mhi_cntrl->regs);
if (!mhi_cntrl->regs) {
MHI_ERR("Error ioremap region\n");
goto error_ioremap;
}
ret = pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI);
if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) {
if (ret == -ENOSPC) {
/* imx_3.14.52_1.1.0_ga
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index f06e8f0..6a9614f 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -376,6 +376,13 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
if (msgvec > 5)
msgvec = 0;
+#if 1 //Add by Quectel 20190419
+ if (msgvec > 0 && pdev->vendor == 0x17cb) {
+ dev_info(&pdev->dev, "%s quectel fixup pos=%d, msg_ctr=%04x, msgvec=%d\n", __func__, desc->msi_attrib.pos, msg_ctr, msgvec);
+ msgvec = 0;
+ }
+#endif
+
irq = assign_irq((1 << msgvec), desc, &pos);
if (irq < 0)
return irq;
*/
}
//imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device
if (ret != 1) {
MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required);
goto error_req_msi;
}
}
mhi_cntrl->msi_allocated = ret;
MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, mhi_cntrl->msi_allocated, pci_dev->irq);
for (i = 0; i < mhi_cntrl->msi_allocated; i++) {
mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i);
if (mhi_cntrl->irq[i] < 0) {
ret = mhi_cntrl->irq[i];
goto error_get_irq_vec;
}
}
#if 0
/* configure runtime pm */
pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS);
pm_runtime_dont_use_autosuspend(&pci_dev->dev);
pm_suspend_ignore_children(&pci_dev->dev, true);
/*
* pci framework will increment usage count (twice) before
* calling local device driver probe function.
* 1st pci.c pci_pm_init() calls pm_runtime_forbid
* 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
* Framework expect pci device driver to call
* pm_runtime_put_noidle to decrement usage count after
* successful probe and and call pm_runtime_allow to enable
* runtime suspend.
*/
pm_runtime_mark_last_busy(&pci_dev->dev);
pm_runtime_put_noidle(&pci_dev->dev);
#endif
return 0;
error_get_irq_vec:
pci_free_irq_vectors(pci_dev);
error_req_msi:
iounmap(mhi_cntrl->regs);
error_ioremap:
pci_clear_master(pci_dev);
error_request_region:
pci_disable_device(pci_dev);
error_enable_device:
pci_release_region(pci_dev, mhi_dev->resn);
return ret;
}
#ifdef CONFIG_PM
static int mhi_runtime_idle(struct device *dev)
{
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Entered returning -EBUSY\n");
/*
* RPM framework during runtime resume always calls
* rpm_idle to see if device ready to suspend.
* If dev.power usage_count count is 0, rpm fw will call
* rpm_idle cb to see if device is ready to suspend.
* if cb return 0, or cb not defined the framework will
* assume device driver is ready to suspend;
* therefore, fw will schedule runtime suspend.
* In MHI power management, MHI host shall go to
* runtime suspend only after entering MHI State M2, even if
* usage count is 0. Return -EBUSY to disable automatic suspend.
*/
return -EBUSY;
}
static int mhi_runtime_suspend(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Enter\n");
mutex_lock(&mhi_cntrl->pm_mutex);
ret = mhi_pm_suspend(mhi_cntrl);
if (ret) {
MHI_LOG("Abort due to ret:%d\n", ret);
goto exit_runtime_suspend;
}
ret = mhi_arch_link_off(mhi_cntrl, true);
if (ret)
MHI_ERR("Failed to Turn off link ret:%d\n", ret);
exit_runtime_suspend:
mutex_unlock(&mhi_cntrl->pm_mutex);
MHI_LOG("Exited with ret:%d\n", ret);
return ret;
}
static int mhi_runtime_resume(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Enter\n");
mutex_lock(&mhi_cntrl->pm_mutex);
if (!mhi_dev->powered_on) {
MHI_LOG("Not fully powered, return success\n");
mutex_unlock(&mhi_cntrl->pm_mutex);
return 0;
}
/* turn on link */
ret = mhi_arch_link_on(mhi_cntrl);
if (ret)
goto rpm_resume_exit;
/* enter M0 state */
ret = mhi_pm_resume(mhi_cntrl);
rpm_resume_exit:
mutex_unlock(&mhi_cntrl->pm_mutex);
MHI_LOG("Exited with :%d\n", ret);
return ret;
}
static int mhi_system_resume(struct device *dev)
{
int ret = 0;
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
ret = mhi_runtime_resume(dev);
if (ret) {
MHI_ERR("Failed to resume link\n");
} else {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return ret;
}
int mhi_system_suspend(struct device *dev)
{
struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
MHI_LOG("Entered\n");
/* if rpm status still active then force suspend */
if (!pm_runtime_status_suspended(dev))
return mhi_runtime_suspend(dev);
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
MHI_LOG("Exit\n");
return 0;
}
#endif
/* checks if link is down */
static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
u16 dev_id;
int ret;
/* try reading device id, if dev id don't match, link is down */
ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id);
return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0;
}
static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
return pm_runtime_get(dev);
}
static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
pm_runtime_put_noidle(dev);
}
static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
void *priv,
enum MHI_CB reason)
{
struct mhi_dev *mhi_dev = priv;
struct device *dev = &mhi_dev->pci_dev->dev;
if (reason == MHI_CB_IDLE) {
MHI_LOG("Schedule runtime suspend 1\n");
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
}
}
int mhi_debugfs_trigger_m0(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Trigger M3 Exit\n");
pm_runtime_get(&mhi_dev->pci_dev->dev);
pm_runtime_put(&mhi_dev->pci_dev->dev);
return 0;
}
int mhi_debugfs_trigger_m3(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("Trigger M3 Entry\n");
pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev);
pm_request_autosuspend(&mhi_dev->pci_dev->dev);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL,
mhi_debugfs_trigger_m0, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL,
mhi_debugfs_trigger_m3, "%llu\n");
static int mhi_init_debugfs_trigger_go(void *data, u64 val)
{
struct mhi_controller *mhi_cntrl = data;
MHI_LOG("Trigger power up sequence\n");
mhi_async_power_up(mhi_cntrl);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(mhi_init_debugfs_trigger_go_fops, NULL,
mhi_init_debugfs_trigger_go, "%llu\n");
int mhi_init_debugfs_debug_show(struct seq_file *m, void *d)
{
seq_puts(m, "Enable debug mode to debug external soc\n");
seq_puts(m,
"Usage: echo 'devid,timeout,domain,smmu_cfg' > debug_mode\n");
seq_puts(m, "No spaces between parameters\n");
seq_puts(m, "\t1. devid : 0 or pci device id to register\n");
seq_puts(m, "\t2. timeout: mhi cmd/state transition timeout\n");
seq_puts(m, "\t3. domain: Rootcomplex\n");
seq_puts(m, "\t4. smmu_cfg: smmu configuration mask:\n");
seq_puts(m, "\t\t- BIT0: ATTACH\n");
seq_puts(m, "\t\t- BIT1: S1 BYPASS\n");
seq_puts(m, "\t\t-BIT2: FAST_MAP\n");
seq_puts(m, "\t\t-BIT3: ATOMIC\n");
seq_puts(m, "\t\t-BIT4: FORCE_COHERENT\n");
seq_puts(m, "\t\t-BIT5: GEOMETRY\n");
seq_puts(m, "\tAll timeout are in ms, enter 0 to keep default\n");
seq_puts(m, "Examples inputs: '0x307,10000'\n");
seq_puts(m, "\techo '0,10000,1'\n");
seq_puts(m, "\techo '0x307,10000,0,0x3d'\n");
seq_puts(m, "firmware image name will be changed to debug.mbn\n");
return 0;
}
static int mhi_init_debugfs_debug_open(struct inode *node, struct file *file)
{
return single_open(file, mhi_init_debugfs_debug_show, NULL);
}
static ssize_t mhi_init_debugfs_debug_write(struct file *fp,
const char __user *ubuf,
size_t count,
loff_t *pos)
{
char *buf = kmalloc(count + 1, GFP_KERNEL);
/* #,devid,timeout,domain,smmu-cfg */
int args[5] = {0};
static char const *dbg_fw = "debug.mbn";
int ret;
struct mhi_controller *mhi_cntrl = fp->f_inode->i_private;
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_device_id *id;
if (!buf)
return -ENOMEM;
ret = copy_from_user(buf, ubuf, count);
if (ret)
goto error_read;
buf[count] = 0;
get_options(buf, ARRAY_SIZE(args), args);
kfree(buf);
/* override default parameters */
mhi_cntrl->fw_image = dbg_fw;
mhi_cntrl->edl_image = dbg_fw;
if (args[0] >= 2 && args[2])
mhi_cntrl->timeout_ms = args[2];
if (args[0] >= 3 && args[3])
mhi_cntrl->domain = args[3];
if (args[0] >= 4 && args[4])
mhi_dev->smmu_cfg = args[4];
/* If it's a new device id register it */
if (args[0] && args[1]) {
/* find the debug_id and overwrite it */
for (id = mhi_pcie_device_id; id->vendor; id++)
if (id->device == MHI_PCIE_DEBUG_ID) {
id->device = args[1];
pci_unregister_driver(&mhi_pcie_driver);
ret = pci_register_driver(&mhi_pcie_driver);
}
}
mhi_dev->debug_mode = true;
debugfs_create_file("go", 0444, mhi_cntrl->parent, mhi_cntrl,
&mhi_init_debugfs_trigger_go_fops);
pr_info(
"%s: ret:%d pcidev:0x%x smm_cfg:%u timeout:%u\n",
__func__, ret, args[1], mhi_dev->smmu_cfg,
mhi_cntrl->timeout_ms);
return count;
error_read:
kfree(buf);
return ret;
}
static const struct file_operations debugfs_debug_ops = {
.open = mhi_init_debugfs_debug_open,
.release = single_release,
.read = seq_read,
.write = mhi_init_debugfs_debug_write,
};
static struct mhi_controller * mhi_platform_probe(struct pci_dev *pci_dev)
{
struct mhi_controller *mhi_cntrl;
struct mhi_dev *mhi_dev;
u64 addr_win[2];
int ret;
mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev));
if (!mhi_cntrl) {
pr_err("mhi_alloc_controller fail\n");
return NULL;
}
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev_id = pci_dev->device;
mhi_cntrl->domain = pci_domain_nr(pci_dev->bus);
mhi_cntrl->bus = pci_dev->bus->number;
mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn);
mhi_dev->smmu_cfg = 0;
#if 0 //def CONFIG_HAVE_MEMBLOCK
addr_win[0] = memblock_start_of_DRAM();
addr_win[1] = memblock_end_of_DRAM();
#else
#define MHI_MEM_BASE_DEFAULT 0x000000000
#define MHI_MEM_SIZE_DEFAULT 0x2000000000
addr_win[0] = MHI_MEM_BASE_DEFAULT;
addr_win[1] = MHI_MEM_SIZE_DEFAULT;
if (sizeof(dma_addr_t) == 4) {
addr_win[1] = 0xFFFFFFFF;
}
#endif
mhi_cntrl->iova_start = addr_win[0];
mhi_cntrl->iova_stop = addr_win[1];
mhi_dev->pci_dev = pci_dev;
mhi_cntrl->pci_dev = pci_dev;
/* setup power management apis */
mhi_cntrl->status_cb = mhi_status_cb;
mhi_cntrl->runtime_get = mhi_runtime_get;
mhi_cntrl->runtime_put = mhi_runtime_put;
mhi_cntrl->link_status = mhi_link_status;
ret = mhi_arch_platform_init(mhi_dev);
if (ret)
goto error_probe;
ret = mhi_register_mhi_controller(mhi_cntrl);
if (ret)
goto error_register;
if (mhi_cntrl->parent)
debugfs_create_file("debug_mode", 0444, mhi_cntrl->parent,
mhi_cntrl, &debugfs_debug_ops);
return mhi_cntrl;
error_register:
mhi_arch_platform_deinit(mhi_dev);
error_probe:
mhi_free_controller(mhi_cntrl);
return NULL;
}
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id)
{
struct mhi_controller *mhi_cntrl = NULL;
u32 domain = pci_domain_nr(pci_dev->bus);
u32 bus = pci_dev->bus->number;
u32 slot = PCI_SLOT(pci_dev->devfn);
struct mhi_dev *mhi_dev;
int ret;
pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n",
__func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device);
mhi_cntrl = mhi_platform_probe(pci_dev);
if (!mhi_cntrl) {
pr_err("mhi_platform_probe fail\n");
return -EPROBE_DEFER;
}
mhi_cntrl->dev_id = pci_dev->device;
mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_dev->pci_dev = pci_dev;
mhi_dev->powered_on = true;
ret = mhi_arch_pcie_init(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_arch_pcie_init, ret:%d\n", ret);
return ret;
}
ret = mhi_arch_iommu_init(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_arch_iommu_init, ret:%d\n", ret);
goto error_iommu_init;
}
ret = mhi_init_pci_dev(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_init_pci_dev, ret:%d\n", ret);
goto error_init_pci;
}
/* start power up sequence if not in debug mode */
if (!mhi_dev->debug_mode) {
ret = mhi_async_power_up(mhi_cntrl);
if (ret) {
MHI_ERR("Error mhi_async_power_up, ret:%d\n", ret);
goto error_power_up;
}
}
#if 0
pm_runtime_mark_last_busy(&pci_dev->dev);
pm_runtime_allow(&pci_dev->dev);
pm_runtime_disable(&pci_dev->dev);
#endif
if (mhi_cntrl->dentry) {
debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl,
&debugfs_trigger_m0_fops);
debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl,
&debugfs_trigger_m3_fops);
}
dev_set_drvdata(&pci_dev->dev, mhi_cntrl);
MHI_LOG("Return successful\n");
return 0;
error_power_up:
mhi_deinit_pci_dev(mhi_cntrl);
error_init_pci:
mhi_arch_iommu_deinit(mhi_cntrl);
error_iommu_init:
mhi_arch_pcie_deinit(mhi_cntrl);
return ret;
}
static void mhi_pci_remove(struct pci_dev *pci_dev)
{
struct mhi_controller *mhi_cntrl = (struct mhi_controller *)dev_get_drvdata(&pci_dev->dev);
if (mhi_cntrl && mhi_cntrl->pci_dev == pci_dev) {
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
MHI_LOG("%s\n", dev_name(&pci_dev->dev));
if (!mhi_dev->debug_mode) {
mhi_power_down(mhi_cntrl, 1);
}
mhi_deinit_pci_dev(mhi_cntrl);
mhi_arch_iommu_deinit(mhi_cntrl);
mhi_arch_pcie_deinit(mhi_cntrl);
mhi_unregister_mhi_controller(mhi_cntrl);
}
}
static const struct dev_pm_ops pm_ops = {
SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
mhi_runtime_resume,
mhi_runtime_idle)
SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume)
};
static struct pci_driver mhi_pcie_driver = {
.name = "mhi",
.id_table = mhi_pcie_device_id,
.probe = mhi_pci_probe,
.remove = mhi_pci_remove,
.driver = {
.pm = &pm_ops
}
};
int __init mhi_controller_qcom_init(void)
{
return pci_register_driver(&mhi_pcie_driver);
};
void mhi_controller_qcom_exit(void)
{
pr_info("%s enter\n", __func__);
pci_unregister_driver(&mhi_pcie_driver);
pr_info("%s exit\n", __func__);
}

View File

@ -0,0 +1,92 @@
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _MHI_QCOM_
#define _MHI_QCOM_
/* iova cfg bitmask */
#define MHI_SMMU_ATTACH BIT(0)
#define MHI_SMMU_S1_BYPASS BIT(1)
#define MHI_SMMU_FAST BIT(2)
#define MHI_SMMU_ATOMIC BIT(3)
#define MHI_SMMU_FORCE_COHERENT BIT(4)
#define MHI_PCIE_VENDOR_ID (0x17cb)
#define MHI_PCIE_DEBUG_ID (0xffff)
#define MHI_RPM_SUSPEND_TMR_MS (3000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {
struct pci_dev *pci_dev;
u32 smmu_cfg;
int resn;
void *arch_info;
bool powered_on;
bool debug_mode;
};
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,65 ))
static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
{
int rc = dma_set_mask(dev, mask);
if (rc == 0)
dma_set_coherent_mask(dev, mask);
return rc;
}
#endif
static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
mhi_cntrl->dev = &mhi_dev->pci_dev->dev;
return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
}
static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
return 0;
}
static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
{
}
static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev)
{
return 0;
}
static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev)
{
}
static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl,
bool graceful)
{
return 0;
}
static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl)
{
return 0;
}
#endif /* _MHI_QCOM_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#ifndef _MHI_QTI_
#define _MHI_QTI_
/* iova cfg bitmask */
#define MHI_SMMU_ATTACH BIT(0)
#define MHI_SMMU_S1_BYPASS BIT(1)
#define MHI_SMMU_FAST BIT(2)
#define MHI_SMMU_ATOMIC BIT(3)
#define MHI_SMMU_FORCE_COHERENT BIT(4)
#define MHI_PCIE_VENDOR_ID (0x17cb)
#define MHI_PCIE_DEBUG_ID (0xffff)
/* runtime suspend timer */
#define MHI_RPM_SUSPEND_TMR_MS (2000)
#define MHI_PCI_BAR_NUM (0)
struct mhi_dev {
struct pci_dev *pci_dev;
u32 smmu_cfg;
int resn;
void *arch_info;
bool powered_on;
dma_addr_t iova_start;
dma_addr_t iova_stop;
bool lpm_disabled;
};
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
void mhi_pci_device_removed(struct pci_dev *pci_dev);
int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl);
void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl);
int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl);
void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl);
int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful);
int mhi_arch_link_on(struct mhi_controller *mhi_cntrl);
#endif /* _MHI_QTI_ */

View File

@ -0,0 +1 @@
obj-$(CONFIG_MHI_BUS) +=mhi_init.o mhi_main.o mhi_pm.o mhi_boot.o mhi_dtr.o

View File

@ -0,0 +1,908 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
#ifndef _MHI_H_
#define _MHI_H_
#define PCIE_MHI_DRIVER_VERSION "V1.3.6"
#define ENABLE_MHI_MON
//#define ENABLE_IP_SW0
// #define ENABLE_ADPL
// #define ENABLE_QDSS
#include <linux/miscdevice.h>
typedef enum
{
MHI_CLIENT_LOOPBACK_OUT = 0,
MHI_CLIENT_LOOPBACK_IN = 1,
MHI_CLIENT_SAHARA_OUT = 2,
MHI_CLIENT_SAHARA_IN = 3,
MHI_CLIENT_DIAG_OUT = 4,
MHI_CLIENT_DIAG_IN = 5,
MHI_CLIENT_SSR_OUT = 6,
MHI_CLIENT_SSR_IN = 7,
MHI_CLIENT_QDSS_OUT = 8,
MHI_CLIENT_QDSS_IN = 9,
MHI_CLIENT_EFS_OUT = 10,
MHI_CLIENT_EFS_IN = 11,
MHI_CLIENT_MBIM_OUT = 12,
MHI_CLIENT_MBIM_IN = 13,
MHI_CLIENT_QMI_OUT = 14,
MHI_CLIENT_QMI_IN = 15,
MHI_CLIENT_QMI_2_OUT = 16,
MHI_CLIENT_QMI_2_IN = 17,
MHI_CLIENT_IP_CTRL_1_OUT = 18,
MHI_CLIENT_IP_CTRL_1_IN = 19,
MHI_CLIENT_IPCR_OUT = 20,
MHI_CLIENT_IPCR_IN = 21,
MHI_CLIENT_TEST_FW_OUT = 22,
MHI_CLIENT_TEST_FW_IN = 23,
MHI_CLIENT_RESERVED_0 = 24,
MHI_CLIENT_BOOT_LOG_IN = 25,
MHI_CLIENT_DCI_OUT = 26,
MHI_CLIENT_DCI_IN = 27,
MHI_CLIENT_QBI_OUT = 28,
MHI_CLIENT_QBI_IN = 29,
MHI_CLIENT_RESERVED_1_LOWER = 30,
MHI_CLIENT_RESERVED_1_UPPER = 31,
MHI_CLIENT_DUN_OUT = 32,
MHI_CLIENT_DUN_IN = 33,
MHI_CLIENT_EDL_OUT = 34,
MHI_CLIENT_EDL_IN = 35,
MHI_CLIENT_ADB_FB_OUT = 36,
MHI_CLIENT_ADB_FB_IN = 37,
MHI_CLIENT_RESERVED_2_LOWER = 38,
MHI_CLIENT_RESERVED_2_UPPER = 41,
MHI_CLIENT_CSVT_OUT = 42,
MHI_CLIENT_CSVT_IN = 43,
MHI_CLIENT_SMCT_OUT = 44,
MHI_CLIENT_SMCT_IN = 45,
MHI_CLIENT_IP_SW_0_OUT = 46,
MHI_CLIENT_IP_SW_0_IN = 47,
MHI_CLIENT_IP_SW_1_OUT = 48,
MHI_CLIENT_IP_SW_1_IN = 49,
MHI_CLIENT_RESERVED_3_LOWER = 50,
MHI_CLIENT_RESERVED_3_UPPER = 59,
MHI_CLIENT_TEST_0_OUT = 60,
MHI_CLIENT_TEST_0_IN = 61,
MHI_CLIENT_TEST_1_OUT = 62,
MHI_CLIENT_TEST_1_IN = 63,
MHI_CLIENT_TEST_2_OUT = 64,
MHI_CLIENT_TEST_2_IN = 65,
MHI_CLIENT_TEST_3_OUT = 66,
MHI_CLIENT_TEST_3_IN = 67,
MHI_CLIENT_RESERVED_4_LOWER = 68,
MHI_CLIENT_RESERVED_4_UPPER = 91,
MHI_CLIENT_OEM_0_OUT = 92,
MHI_CLIENT_OEM_0_IN = 93,
MHI_CLIENT_OEM_1_OUT = 94,
MHI_CLIENT_OEM_1_IN = 95,
MHI_CLIENT_OEM_2_OUT = 96,
MHI_CLIENT_OEM_2_IN = 97,
MHI_CLIENT_OEM_3_OUT = 98,
MHI_CLIENT_OEM_3_IN = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_ADPL = 102,
MHI_CLIENT_IP_HW_QDSS = 103,
// MHI_CLIENT_RESERVED_5_LOWER = 103,
MHI_CLIENT_RESERVED_5_UPPER = 127,
MHI_MAX_CHANNELS = 128
}MHI_CLIENT_CHANNEL_TYPE;
/* Event Ring Index */
typedef enum
{
SW_EVT_RING = 0,
PRIMARY_EVENT_RING = SW_EVT_RING,
#ifdef ENABLE_IP_SW0
SW_0_OUT_EVT_RING,
SW_0_IN_EVT_RING,
#endif
IPA_OUT_EVENT_RING,
IPA_IN_EVENT_RING,
#ifdef ENABLE_ADPL
ADPL_EVT_RING,
#endif
#ifdef ENABLE_QDSS
QDSS_EVT_RING,
#endif
MAX_EVT_RING_IDX
}MHI_EVT_RING_IDX;
#define MHI_VERSION 0x01000000
#define MHIREGLEN_VALUE 0x100 /* **** WRONG VALUE *** */
#define MHI_MSI_INDEX 1
#define MAX_NUM_MHI_DEVICES 1
#define NUM_MHI_XFER_RINGS 128
#define NUM_MHI_EVT_RINGS MAX_EVT_RING_IDX
#define NUM_MHI_HW_EVT_RINGS 4
#define NUM_MHI_XFER_RING_ELEMENTS 16
#define NUM_MHI_EVT_RING_ELEMENTS (NUM_MHI_IPA_IN_RING_ELEMENTS*2) //must *2, event ring full will make x55 dump
#define NUM_MHI_IPA_IN_RING_ELEMENTS 512
#define NUM_MHI_IPA_OUT_RING_ELEMENTS 512 //donot use ul agg, so increase
#define NUM_MHI_DIAG_IN_RING_ELEMENTS 128
#define NUM_MHI_SW_IP_RING_ELEMENTS 512
#ifdef ENABLE_ADPL
#define NUM_MHI_ADPL_RING_ELEMENTS 256
#endif
#ifdef ENABLE_QDSS
#define NUM_MHI_QDSS_RING_ELEMENTS 256
#endif
/*
* for if set Interrupt moderation time as 1ms,
and transfer more than NUM_MHI_CHAN_RING_ELEMENTS data are sent to the modem in 1ms.
e.g. firehose upgrade.
modem will not trigger irq for these transfer.
*/
#define NUM_MHI_CHAN_RING_ELEMENTS 32 //8
#define MHI_EVT_CMD_QUEUE_SIZE 160
#define MHI_EVT_STATE_QUEUE_SIZE 128
#define MHI_EVT_XFER_QUEUE_SIZE 1024
#define CHAN_INBOUND(_x) ((_x)%2)
#define CHAN_SBL(_x) (((_x) == MHI_CLIENT_SAHARA_OUT) || \
((_x) == MHI_CLIENT_SAHARA_IN) || \
((_x) == MHI_CLIENT_BOOT_LOG_IN))
#define CHAN_EDL(_x) (((_x) == MHI_CLIENT_EDL_OUT) || \
((_x) == MHI_CLIENT_EDL_IN))
struct mhi_chan;
struct mhi_event;
struct mhi_ctxt;
struct mhi_cmd;
struct image_info;
struct bhi_vec_entry;
struct mhi_timesync;
struct mhi_buf_info;
/**
* enum MHI_CB - MHI callback
* @MHI_CB_IDLE: MHI entered idle state
* @MHI_CB_PENDING_DATA: New data available for client to process
* @MHI_CB_LPM_ENTER: MHI host entered low power mode
* @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
* @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
* @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env
* @MHI_CB_SYS_ERROR: MHI device enter error state (may recover)
* @MHI_CB_FATAL_ERROR: MHI device entered fatal error
*/
enum MHI_CB {
MHI_CB_IDLE,
MHI_CB_PENDING_DATA,
MHI_CB_LPM_ENTER,
MHI_CB_LPM_EXIT,
MHI_CB_EE_RDDM,
MHI_CB_EE_MISSION_MODE,
MHI_CB_SYS_ERROR,
MHI_CB_FATAL_ERROR,
};
/**
* enum MHI_DEBUG_LEVL - various debugging level
*/
enum MHI_DEBUG_LEVEL {
MHI_MSG_LVL_VERBOSE,
MHI_MSG_LVL_INFO,
MHI_MSG_LVL_ERROR,
MHI_MSG_LVL_CRITICAL,
MHI_MSG_LVL_MASK_ALL,
};
/*
GSI_XFER_FLAG_BEI: Block event interrupt
1: Event generated by this ring element must not assert an interrupt to the host
0: Event generated by this ring element must assert an interrupt to the host
GSI_XFER_FLAG_EOT: Interrupt on end of transfer
1: If an EOT condition is encountered when processing this ring element, an event is generated by the device with its completion code set to EOT.
0: If an EOT condition is encountered for this ring element, a completion event is not be generated by the device, unless IEOB is 1
GSI_XFER_FLAG_EOB: Interrupt on end of block
1: Device notifies host after processing this ring element by sending a completion event
0: Completion event is not required after processing this ring element
GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring elements in a TD
*/
/**
* enum MHI_FLAGS - Transfer flags
* @MHI_EOB: End of buffer for bulk transfer
* @MHI_EOT: End of transfer
* @MHI_CHAIN: Linked transfer
*/
enum MHI_FLAGS {
MHI_EOB,
MHI_EOT,
MHI_CHAIN,
};
/**
* enum mhi_device_type - Device types
* @MHI_XFER_TYPE: Handles data transfer
* @MHI_TIMESYNC_TYPE: Use for timesync feature
* @MHI_CONTROLLER_TYPE: Control device
*/
enum mhi_device_type {
MHI_XFER_TYPE,
MHI_TIMESYNC_TYPE,
MHI_CONTROLLER_TYPE,
};
/**
* enum mhi_ee - device current execution enviornment
* @MHI_EE_PBL - device in PBL
* @MHI_EE_SBL - device in SBL
* @MHI_EE_AMSS - device in mission mode (firmware fully loaded)
* @MHI_EE_RDDM - device in ram dump collection mode
* @MHI_EE_WFW - device in WLAN firmware mode
* @MHI_EE_PTHRU - device in PBL but configured in pass thru mode
* @MHI_EE_EDL - device in emergency download mode
*/
enum mhi_ee {
MHI_EE_PBL = 0x0,
MHI_EE_SBL = 0x1,
MHI_EE_AMSS = 0x2,
MHI_EE_RDDM = 0x3,
MHI_EE_WFW = 0x4,
MHI_EE_PTHRU = 0x5,
MHI_EE_EDL = 0x6,
MHI_EE_FP = 0x7, /* FlashProg, Flash Programmer Environment */
MHI_EE_MAX_SUPPORTED = MHI_EE_FP,
MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
MHI_EE_MAX,
};
/**
* enum mhi_dev_state - device current MHI state
*/
enum mhi_dev_state {
MHI_STATE_RESET = 0x0,
MHI_STATE_READY = 0x1,
MHI_STATE_M0 = 0x2,
MHI_STATE_M1 = 0x3,
MHI_STATE_M2 = 0x4,
MHI_STATE_M3 = 0x5,
MHI_STATE_BHI = 0x7,
MHI_STATE_SYS_ERR = 0xFF,
MHI_STATE_MAX,
};
extern const char * const mhi_ee_str[MHI_EE_MAX];
#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
"INVALID_EE" : mhi_ee_str[ee])
/**
* struct image_info - firmware and rddm table table
* @mhi_buf - Contain device firmware and rddm table
* @entries - # of entries in table
*/
struct image_info {
struct mhi_buf *mhi_buf;
struct bhi_vec_entry *bhi_vec;
u32 entries;
};
/**
* struct mhi_controller - Master controller structure for external modem
* @dev: Device associated with this controller
* @of_node: DT that has MHI configuration information
* @regs: Points to base of MHI MMIO register space
* @bhi: Points to base of MHI BHI register space
* @bhie: Points to base of MHI BHIe register space
* @wake_db: MHI WAKE doorbell register address
* @dev_id: PCIe device id of the external device
* @domain: PCIe domain the device connected to
* @bus: PCIe bus the device assigned to
* @slot: PCIe slot for the modem
* @iova_start: IOMMU starting address for data
* @iova_stop: IOMMU stop address for data
* @fw_image: Firmware image name for normal booting
* @edl_image: Firmware image name for emergency download mode
* @fbc_download: MHI host needs to do complete image transfer
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size
* @seg_len: BHIe vector size
* @fbc_image: Points to firmware image buffer
* @rddm_image: Points to RAM dump buffer
* @max_chan: Maximum number of channels controller support
* @mhi_chan: Points to channel configuration table
* @lpm_chans: List of channels that require LPM notifications
* @total_ev_rings: Total # of event rings allocated
* @hw_ev_rings: Number of hardware event rings
* @sw_ev_rings: Number of software event rings
* @msi_required: Number of msi required to operate
* @msi_allocated: Number of msi allocated by bus master
* @irq: base irq # to request
* @mhi_event: MHI event ring configurations table
* @mhi_cmd: MHI command ring configurations table
* @mhi_ctxt: MHI device context, shared memory between host and device
* @timeout_ms: Timeout in ms for state transitions
* @pm_state: Power management state
* @ee: MHI device execution environment
* @dev_state: MHI STATE
* @status_cb: CB function to notify various power states to but master
* @link_status: Query link status in case of abnormal value read from device
* @runtime_get: Async runtime resume function
* @runtimet_put: Release votes
* @time_get: Return host time in us
* @lpm_disable: Request controller to disable link level low power modes
* @lpm_enable: Controller may enable link level low power modes again
* @priv_data: Points to bus master's private data
*/
struct mhi_controller {
struct list_head node;
struct mhi_device *mhi_dev;
/* device node for iommu ops */
struct device *dev;
struct device_node *of_node;
/* mmio base */
phys_addr_t base_addr;
void __iomem *regs;
void __iomem *bhi;
void __iomem *bhie;
void __iomem *wake_db;
/* device topology */
u32 vendor;
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
u32 cntrl_idx;
struct device *cntrl_dev;
/* addressing window */
dma_addr_t iova_start;
dma_addr_t iova_stop;
/* fw images */
const char *fw_image;
const char *edl_image;
/* mhi host manages downloading entire fbc images */
bool fbc_download;
size_t rddm_size;
size_t sbl_size;
size_t seg_len;
u32 session_id;
u32 sequence_id;
struct image_info *fbc_image;
struct image_info *rddm_image;
/* physical channel config data */
u32 max_chan;
struct mhi_chan *mhi_chan;
struct list_head lpm_chans; /* these chan require lpm notification */
/* physical event config data */
u32 total_ev_rings;
u32 hw_ev_rings;
u32 sw_ev_rings;
u32 msi_required;
u32 msi_allocated;
u32 msi_irq_base;
int *irq; /* interrupt table */
struct mhi_event *mhi_event;
/* cmd rings */
struct mhi_cmd *mhi_cmd;
/* mhi context (shared with device) */
struct mhi_ctxt *mhi_ctxt;
u32 timeout_ms;
/* caller should grab pm_mutex for suspend/resume operations */
struct mutex pm_mutex;
bool pre_init;
rwlock_t pm_lock;
u32 pm_state;
enum mhi_ee ee;
enum mhi_dev_state dev_state;
bool wake_set;
atomic_t dev_wake;
atomic_t alloc_size;
atomic_t pending_pkts;
struct list_head transition_list;
spinlock_t transition_lock;
spinlock_t wlock;
/* debug counters */
u32 M0, M2, M3;
/* worker for different state transitions */
struct work_struct st_worker;
struct work_struct fw_worker;
struct work_struct syserr_worker;
struct delayed_work ready_worker;
wait_queue_head_t state_event;
/* shadow functions */
void (*status_cb)(struct mhi_controller *mhi_cntrl, void *priv,
enum MHI_CB reason);
int (*link_status)(struct mhi_controller *mhi_cntrl, void *priv);
void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_mark_last_busy)(struct mhi_controller *mhi_cntrl, void *priv);
u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv);
int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv);
int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv);
int (*map_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
void (*unmap_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
/* channel to control DTR messaging */
struct mhi_device *dtr_dev;
/* bounce buffer settings */
bool bounce_buf;
size_t buffer_len;
/* supports time sync feature */
struct mhi_timesync *mhi_tsync;
struct mhi_device *tsync_dev;
/* kernel log level */
enum MHI_DEBUG_LEVEL klog_lvl;
int klog_slient;
/* private log level controller driver to set */
enum MHI_DEBUG_LEVEL log_lvl;
/* controller specific data */
void *priv_data;
void *log_buf;
struct dentry *dentry;
struct dentry *parent;
struct miscdevice miscdev;
#ifdef ENABLE_MHI_MON
spinlock_t lock;
/* Ref */
int nreaders; /* Under mon_lock AND mbus->lock */
struct list_head r_list; /* Chain of readers (usually one) */
struct kref ref; /* Under mon_lock */
/* Stats */
unsigned int cnt_events;
unsigned int cnt_text_lost;
#endif
};
#ifdef ENABLE_MHI_MON
struct mhi_tre;
struct mon_reader {
struct list_head r_link;
struct mhi_controller *m_bus;
void *r_data; /* Use container_of instead? */
void (*rnf_submit)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len);
void (*rnf_receive)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len);
void (*rnf_complete)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre);
};
#endif
/**
* struct mhi_device - mhi device structure associated bind to channel
* @dev: Device associated with the channels
* @mtu: Maximum # of bytes controller support
* @ul_chan_id: MHI channel id for UL transfer
* @dl_chan_id: MHI channel id for DL transfer
* @tiocm: Device current terminal settings
* @priv: Driver private data
*/
struct mhi_device {
struct device dev;
u32 vendor;
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
size_t mtu;
int ul_chan_id;
int dl_chan_id;
int ul_event_id;
int dl_event_id;
u32 tiocm;
const struct mhi_device_id *id;
const char *chan_name;
struct mhi_controller *mhi_cntrl;
struct mhi_chan *ul_chan;
struct mhi_chan *dl_chan;
atomic_t dev_wake;
enum mhi_device_type dev_type;
void *priv_data;
int (*ul_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS flags);
int (*dl_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t size, enum MHI_FLAGS flags);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB reason);
};
/**
* struct mhi_result - Completed buffer information
* @buf_addr: Address of data buffer
* @dir: Channel direction
* @bytes_xfer: # of bytes transferred
* @transaction_status: Status of last trasnferred
*/
struct mhi_result {
void *buf_addr;
enum dma_data_direction dir;
size_t bytes_xferd;
int transaction_status;
};
/**
* struct mhi_buf - Describes the buffer
* @page: buffer as a page
* @buf: cpu address for the buffer
* @phys_addr: physical address of the buffer
* @dma_addr: iommu address for the buffer
* @skb: skb of ip packet
* @len: # of bytes
* @name: Buffer label, for offload channel configurations name must be:
* ECA - Event context array data
* CCA - Channel context array data
*/
struct mhi_buf {
struct list_head node;
struct page *page;
void *buf;
phys_addr_t phys_addr;
dma_addr_t dma_addr;
struct sk_buff *skb;
size_t len;
const char *name; /* ECA, CCA */
};
/**
* struct mhi_driver - mhi driver information
* @id_table: NULL terminated channel ID names
* @ul_xfer_cb: UL data transfer callback
* @dl_xfer_cb: DL data transfer callback
* @status_cb: Asynchronous status callback
*/
struct mhi_driver {
const struct mhi_device_id *id_table;
int (*probe)(struct mhi_device *mhi_dev,
const struct mhi_device_id *id);
void (*remove)(struct mhi_device *mhi_dev);
void (*ul_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res);
void (*dl_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb);
struct device_driver driver;
};
#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev,
void *priv)
{
mhi_dev->priv_data = priv;
}
static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev)
{
return mhi_dev->priv_data;
}
/**
* mhi_queue_transfer - Queue a buffer to hardware
* All transfers are asyncronous transfers
* @mhi_dev: Device associated with the channels
* @dir: Data direction
* @buf: Data buffer (skb for hardware channels)
* @len: Size in bytes
* @mflags: Interrupt flags for the device
*/
static inline int mhi_queue_transfer(struct mhi_device *mhi_dev,
enum dma_data_direction dir,
void *buf,
size_t len,
enum MHI_FLAGS mflags)
{
if (dir == DMA_TO_DEVICE)
return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len,
mflags);
else
return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len,
mflags);
}
static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl)
{
return mhi_cntrl->priv_data;
}
static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl)
{
kfree(mhi_cntrl);
}
/**
* mhi_driver_register - Register driver with MHI framework
* @mhi_drv: mhi_driver structure
*/
int mhi_driver_register(struct mhi_driver *mhi_drv);
/**
* mhi_driver_unregister - Unregister a driver for mhi_devices
* @mhi_drv: mhi_driver structure
*/
void mhi_driver_unregister(struct mhi_driver *mhi_drv);
/**
* mhi_device_configure - configure ECA or CCA context
* For offload channels that client manage, call this
* function to configure channel context or event context
* array associated with the channel
* @mhi_div: Device associated with the channels
* @dir: Direction of the channel
* @mhi_buf: Configuration data
* @elements: # of configuration elements
*/
int mhi_device_configure(struct mhi_device *mhi_div,
enum dma_data_direction dir,
struct mhi_buf *mhi_buf,
int elements);
/**
* mhi_device_get - disable all low power modes
* Only disables lpm, does not immediately exit low power mode
* if controller already in a low power mode
* @mhi_dev: Device associated with the channels
*/
void mhi_device_get(struct mhi_device *mhi_dev);
/**
* mhi_device_get_sync - disable all low power modes
* Synchronously disable all low power, exit low power mode if
* controller already in a low power state
* @mhi_dev: Device associated with the channels
*/
int mhi_device_get_sync(struct mhi_device *mhi_dev);
/**
* mhi_device_put - re-enable low power modes
* @mhi_dev: Device associated with the channels
*/
void mhi_device_put(struct mhi_device *mhi_dev);
/**
* mhi_prepare_for_transfer - setup channel for data transfer
* Moves both UL and DL channel from RESET to START state
* @mhi_dev: Device associated with the channels
*/
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
/**
* mhi_unprepare_from_transfer -unprepare the channels
* Moves both UL and DL channels to RESET state
* @mhi_dev: Device associated with the channels
*/
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
/**
* mhi_get_no_free_descriptors - Get transfer ring length
* Get # of TD available to queue buffers
* @mhi_dev: Device associated with the channels
* @dir: Direction of the channel
*/
int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
enum dma_data_direction dir);
/**
* mhi_poll - poll for any available data to consume
* This is only applicable for DL direction
* @mhi_dev: Device associated with the channels
* @budget: In descriptors to service before returning
*/
int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
/**
* mhi_ioctl - user space IOCTL support for MHI channels
* Native support for setting TIOCM
* @mhi_dev: Device associated with the channels
* @cmd: IOCTL cmd
* @arg: Optional parameter, iotcl cmd specific
*/
long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg);
/**
* mhi_alloc_controller - Allocate mhi_controller structure
* Allocate controller structure and additional data for controller
* private data. You may get the private data pointer by calling
* mhi_controller_get_devdata
* @size: # of additional bytes to allocate
*/
struct mhi_controller *mhi_alloc_controller(size_t size);
/**
* of_register_mhi_controller - Register MHI controller
* Registers MHI controller with MHI bus framework. DT must be supported
* @mhi_cntrl: MHI controller to register
*/
int of_register_mhi_controller(struct mhi_controller *mhi_cntrl);
void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
/**
* mhi_bdf_to_controller - Look up a registered controller
* Search for controller based on device identification
* @domain: RC domain of the device
* @bus: Bus device connected to
* @slot: Slot device assigned to
* @dev_id: Device Identification
*/
struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot,
u32 dev_id);
/**
* mhi_prepare_for_power_up - Do pre-initialization before power up
* This is optional, call this before power up if controller do not
* want bus framework to automatically free any allocated memory during shutdown
* process.
* @mhi_cntrl: MHI controller
*/
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_async_power_up - Starts MHI power up sequence
* @mhi_cntrl: MHI controller
*/
int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_power_down - Start MHI power down sequence
* @mhi_cntrl: MHI controller
* @graceful: link is still accessible, do a graceful shutdown process otherwise
* we will shutdown host w/o putting device into RESET state
*/
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
/**
* mhi_unprepare_after_powre_down - free any allocated memory for power up
* @mhi_cntrl: MHI controller
*/
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_suspend - Move MHI into a suspended state
* Transition to MHI state M3 state from M0||M1||M2 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_resume - Resume MHI from suspended state
* Transition to MHI state M0 state from M3 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
/**
* mhi_download_rddm_img - Download ramdump image from device for
* debugging purpose.
* @mhi_cntrl: MHI controller
* @in_panic: If we trying to capture image while in kernel panic
*/
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
/**
* mhi_force_rddm_mode - Force external device into rddm mode
* to collect device ramdump. This is useful if host driver assert
* and we need to see device state as well.
* @mhi_cntrl: MHI controller
*/
int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
/**
* mhi_get_remote_time_sync - Get external soc time relative to local soc time
* using MMIO method.
* @mhi_dev: Device associated with the channels
* @t_host: Pointer to output local soc time
* @t_dev: Pointer to output remote soc time
*/
int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
u64 *t_host,
u64 *t_dev);
/**
* mhi_get_mhi_state - Return MHI state of device
* @mhi_cntrl: MHI controller
*/
enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
/**
* mhi_set_mhi_state - Set device state
* @mhi_cntrl: MHI controller
* @state: state to set
*/
void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
enum mhi_dev_state state);
/**
* mhi_is_active - helper function to determine if MHI in active state
* @mhi_dev: client device
*/
static inline bool mhi_is_active(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
mhi_cntrl->dev_state <= MHI_STATE_M3);
}
/**
* mhi_debug_reg_dump - dump MHI registers for debug purpose
* @mhi_cntrl: MHI controller
*/
void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl);
#ifdef CONFIG_MHI_DEBUG
#define MHI_VERB(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_debug("[D][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
} while (0)
#else
#define MHI_VERB(fmt, ...)
#endif
#define MHI_LOG(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \
pr_info("[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
else if (!mhi_cntrl->klog_slient) \
printk(KERN_DEBUG "[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\
} while (0)
#define MHI_ERR(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \
} while (0)
#define MHI_CRITICAL(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
pr_alert("[C][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \
} while (0)
int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl);
void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
#ifndef MHI_NAME_SIZE
#define MHI_NAME_SIZE 32
/**
* * struct mhi_device_id - MHI device identification
* * @chan: MHI channel name
* * @driver_data: driver data;
* */
struct mhi_device_id {
const char chan[MHI_NAME_SIZE];
unsigned long driver_data;
};
#endif
#endif /* _MHI_H_ */

View File

@ -0,0 +1,860 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include "mhi.h"
#include "mhi_internal.h"
/* Software defines */
/* BHI Version */
#define BHI_MAJOR_VERSION 0x1
#define BHI_MINOR_VERSION 0x1
#define MSMHWID_NUMDWORDS 6 /* Number of dwords that make the MSMHWID */
#define OEMPKHASH_NUMDWORDS 48 /* Number of dwords that make the OEM PK HASH */
#define IsPBLExecEnv(ExecEnv) ((ExecEnv == MHI_EE_PBL) || (ExecEnv == MHI_EE_EDL) )
typedef u32 ULONG;
typedef struct _bhi_info_type
{
ULONG bhi_ver_minor;
ULONG bhi_ver_major;
ULONG bhi_image_address_low;
ULONG bhi_image_address_high;
ULONG bhi_image_size;
ULONG bhi_rsvd1;
ULONG bhi_imgtxdb;
ULONG bhi_rsvd2;
ULONG bhi_msivec;
ULONG bhi_rsvd3;
ULONG bhi_ee;
ULONG bhi_status;
ULONG bhi_errorcode;
ULONG bhi_errdbg1;
ULONG bhi_errdbg2;
ULONG bhi_errdbg3;
ULONG bhi_sernum;
ULONG bhi_sblantirollbackver;
ULONG bhi_numsegs;
ULONG bhi_msmhwid[6];
ULONG bhi_oempkhash[48];
ULONG bhi_rsvd5;
}BHI_INFO_TYPE, *PBHI_INFO_TYPE;
static void PrintBhiInfo(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info)
{
ULONG index;
char str[128];
MHI_LOG("BHI Device Info...\n");
MHI_LOG("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor);
MHI_LOG("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee);
MHI_LOG("BHI Status = 0x%X\n", bhi_info->bhi_status);
MHI_LOG("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3);
MHI_LOG("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum);
MHI_LOG("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver);
MHI_LOG("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs);
for (index = 0; index < 6; index++)
{
snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_msmhwid[index]);
}
MHI_LOG("BHI MSM HW-Id = %s\n", str);
for (index = 0; index < 24; index++)
{
snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_oempkhash[index]);
}
MHI_LOG("BHI OEM PK Hash = %s\n", str);
}
static u32 bhi_read_reg(struct mhi_controller *mhi_cntrl, u32 offset)
{
u32 out = 0;
int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &out);
return (ret) ? 0 : out;
}
static int BhiRead(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info)
{
ULONG index;
memset(bhi_info, 0x00, sizeof(BHI_INFO_TYPE));
/* bhi_ver */
bhi_info->bhi_ver_minor = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR);
bhi_info->bhi_ver_major = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR);
bhi_info->bhi_image_address_low = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_LOW);
bhi_info->bhi_image_address_high = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_HIGH);
bhi_info->bhi_image_size = bhi_read_reg(mhi_cntrl, BHI_IMGSIZE);
bhi_info->bhi_rsvd1 = bhi_read_reg(mhi_cntrl, BHI_RSVD1);
bhi_info->bhi_imgtxdb = bhi_read_reg(mhi_cntrl, BHI_IMGTXDB);
bhi_info->bhi_rsvd2 = bhi_read_reg(mhi_cntrl, BHI_RSVD2);
bhi_info->bhi_msivec = bhi_read_reg(mhi_cntrl, BHI_INTVEC);
bhi_info->bhi_rsvd3 = bhi_read_reg(mhi_cntrl, BHI_RSVD3);
bhi_info->bhi_ee = bhi_read_reg(mhi_cntrl, BHI_EXECENV);
bhi_info->bhi_status = bhi_read_reg(mhi_cntrl, BHI_STATUS);
bhi_info->bhi_errorcode = bhi_read_reg(mhi_cntrl, BHI_ERRCODE);
bhi_info->bhi_errdbg1 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG1);
bhi_info->bhi_errdbg2 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG2);
bhi_info->bhi_errdbg3 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG3);
bhi_info->bhi_sernum = bhi_read_reg(mhi_cntrl, BHI_SERIALNU);
bhi_info->bhi_sblantirollbackver = bhi_read_reg(mhi_cntrl, BHI_SBLANTIROLLVER);
bhi_info->bhi_numsegs = bhi_read_reg(mhi_cntrl, BHI_NUMSEG);
for (index = 0; index < MSMHWID_NUMDWORDS; index++)
{
bhi_info->bhi_msmhwid[index] = bhi_read_reg(mhi_cntrl, BHI_MSMHWID(index));
}
for (index = 0; index < OEMPKHASH_NUMDWORDS; index++)
{
bhi_info->bhi_oempkhash[index] = bhi_read_reg(mhi_cntrl, BHI_OEMPKHASH(index));
}
bhi_info->bhi_rsvd5 = bhi_read_reg(mhi_cntrl, BHI_RSVD5);
PrintBhiInfo(mhi_cntrl, bhi_info);
/* Check the Execution Environment */
if (!IsPBLExecEnv(bhi_info->bhi_ee))
{
MHI_LOG("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee);
}
/* Return the number of bytes read */
return 0;
}
/* setup rddm vector table for rddm transfer */
static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info)
{
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
int i = 0;
for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
MHI_VERB("Setting vector:%pad size:%zu\n",
&mhi_buf->dma_addr, mhi_buf->len);
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = mhi_buf->len;
}
}
/* collect rddm during kernel panic */
static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
{
int ret;
struct mhi_buf *mhi_buf;
u32 sequence_id;
u32 rx_status;
enum mhi_ee ee;
struct image_info *rddm_image = mhi_cntrl->rddm_image;
const u32 delayus = 2000;
u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
const u32 rddm_timeout_us = 200000;
int rddm_retry = rddm_timeout_us / delayus; /* time to enter rddm */
void __iomem *base = mhi_cntrl->bhie;
MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
/*
* This should only be executing during a kernel panic, we expect all
* other cores to shutdown while we're collecting rddm buffer. After
* returning from this function, we expect device to reset.
*
* Normaly, we would read/write pm_state only after grabbing
* pm_lock, since we're in a panic, skipping it. Also there is no
* gurantee this state change would take effect since
* we're setting it w/o grabbing pmlock, it's best effort
*/
mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
/* update should take the effect immediately */
smp_wmb();
/* setup the RX vector table */
mhi_rddm_prepare(mhi_cntrl, rddm_image);
mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
MHI_LOG("Starting BHIe programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#else
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#endif
if (unlikely(!sequence_id))
sequence_id = 1;
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id);
MHI_LOG("Trigger device into RDDM mode\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
MHI_LOG("Waiting for device to enter RDDM\n");
while (rddm_retry--) {
ee = mhi_get_exec_env(mhi_cntrl);
if (ee == MHI_EE_RDDM)
break;
udelay(delayus);
}
if (rddm_retry <= 0) {
/* This is a hardware reset, will force device to enter rddm */
MHI_LOG(
"Did not enter RDDM triggering host req. reset to force rddm\n");
mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
MHI_SOC_RESET_REQ_OFFSET, MHI_SOC_RESET_REQ);
udelay(delayus);
}
ee = mhi_get_exec_env(mhi_cntrl);
MHI_LOG("Waiting for image download completion, current EE:%s\n",
TO_MHI_EXEC_STR(ee));
while (retry--) {
ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK,
BHIE_RXVECSTATUS_STATUS_SHFT,
&rx_status);
if (ret)
return -EIO;
if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
MHI_LOG("RDDM successfully collected\n");
return 0;
}
udelay(delayus);
}
ee = mhi_get_exec_env(mhi_cntrl);
ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);
MHI_ERR("Did not complete RDDM transfer\n");
MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee));
MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret);
return -EIO;
}
/* download ramdump image from device */
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
{
void __iomem *base = mhi_cntrl->bhie;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct image_info *rddm_image = mhi_cntrl->rddm_image;
struct mhi_buf *mhi_buf;
int ret;
u32 rx_status;
u32 sequence_id;
if (!rddm_image)
return -ENOMEM;
if (in_panic)
return __mhi_download_rddm_in_panic(mhi_cntrl);
MHI_LOG("Waiting for device to enter RDDM state from EE:%s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_RDDM ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid state, pm_state:%s ee:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee));
return -EIO;
}
mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
/* vector table is the last entry */
mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1];
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
return -EIO;
}
MHI_LOG("Starting BHIe Programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#else
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
#endif
mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS,
BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
sequence_id);
read_unlock_bh(pm_lock);
MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
upper_32_bits(mhi_buf->dma_addr),
lower_32_bits(mhi_buf->dma_addr),
mhi_buf->len, sequence_id);
MHI_LOG("Waiting for image download completion\n");
/* waiting for image download completion */
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base,
BHIE_RXVECSTATUS_OFFS,
BHIE_RXVECSTATUS_STATUS_BMSK,
BHIE_RXVECSTATUS_STATUS_SHFT,
&rx_status) || rx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
}
EXPORT_SYMBOL(mhi_download_rddm_img);
static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
const struct mhi_buf *mhi_buf)
{
void __iomem *base = mhi_cntrl->bhie;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
u32 tx_status;
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
return -EIO;
}
MHI_LOG("Starting BHIe Programming\n");
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
mhi_cntrl->sequence_id = get_random_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
#else
mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
#endif
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
mhi_cntrl->sequence_id);
read_unlock_bh(pm_lock);
MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n",
upper_32_bits(mhi_buf->dma_addr),
lower_32_bits(mhi_buf->dma_addr),
mhi_buf->len, mhi_cntrl->sequence_id);
MHI_LOG("Waiting for image transfer completion\n");
/* waiting for image download completion */
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base,
BHIE_TXVECSTATUS_OFFS,
BHIE_TXVECSTATUS_STATUS_BMSK,
BHIE_TXVECSTATUS_STATUS_SHFT,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
}
static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
dma_addr_t dma_addr,
size_t size)
{
u32 tx_status, val;
u32 ImgTxDb = 0x1;
int i, ret;
void __iomem *base = mhi_cntrl->bhi;
rwlock_t *pm_lock = &mhi_cntrl->pm_lock;
struct {
char *name;
u32 offset;
} error_reg[] = {
{ "ERROR_CODE", BHI_ERRCODE },
{ "ERROR_DBG1", BHI_ERRDBG1 },
{ "ERROR_DBG2", BHI_ERRDBG2 },
{ "ERROR_DBG3", BHI_ERRDBG3 },
{ NULL },
};
MHI_LOG("Starting BHI programming\n");
/* program start sbl download via bhi protocol */
read_lock_bh(pm_lock);
if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
upper_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
lower_32_bits(dma_addr));
mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS);
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base);
mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, ImgTxDb);
read_unlock_bh(pm_lock);
MHI_LOG("Waiting for image transfer completion\n");
/* waiting for image download completion */
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) ||
mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS,
BHI_STATUS_MASK, BHI_STATUS_SHIFT,
&tx_status) || tx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
goto invalid_pm_state;
if (tx_status == BHI_STATUS_ERROR) {
MHI_ERR("Image transfer failed\n");
read_lock_bh(pm_lock);
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
for (i = 0; error_reg[i].name; i++) {
ret = mhi_read_reg(mhi_cntrl, base,
error_reg[i].offset, &val);
if (ret)
break;
MHI_ERR("reg:%s value:0x%x\n",
error_reg[i].name, val);
}
}
read_unlock_bh(pm_lock);
goto invalid_pm_state;
}
return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT;
invalid_pm_state:
return -EIO;
}
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info *image_info)
{
int i;
struct mhi_buf *mhi_buf = image_info->mhi_buf;
for (i = 0; i < image_info->entries; i++, mhi_buf++)
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
mhi_buf->dma_addr);
kfree(image_info->mhi_buf);
kfree(image_info);
}
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
size_t alloc_size)
{
size_t seg_size = mhi_cntrl->seg_len;
/* requier additional entry for vec table */
int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
int i;
struct image_info *img_info;
struct mhi_buf *mhi_buf;
MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n",
alloc_size, seg_size, segments);
img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
if (!img_info)
return -ENOMEM;
/* allocate memory for entries */
img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
GFP_KERNEL);
if (!img_info->mhi_buf)
goto error_alloc_mhi_buf;
/* allocate and populate vector table */
mhi_buf = img_info->mhi_buf;
for (i = 0; i < segments; i++, mhi_buf++) {
size_t vec_size = seg_size;
/* last entry is for vector table */
if (i == segments - 1)
vec_size = sizeof(struct bhi_vec_entry) * i;
mhi_buf->len = vec_size;
mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size,
&mhi_buf->dma_addr, GFP_KERNEL);
if (!mhi_buf->buf)
goto error_alloc_segment;
MHI_LOG("Entry:%d Address:0x%llx size:%zu\n", i,
(unsigned long long)mhi_buf->dma_addr,
mhi_buf->len);
}
img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
img_info->entries = segments;
*image_info = img_info;
MHI_LOG("Successfully allocated bhi vec table\n");
return 0;
error_alloc_segment:
for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf,
mhi_buf->dma_addr);
error_alloc_mhi_buf:
kfree(img_info);
return -ENOMEM;
}
static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
const struct firmware *firmware,
struct image_info *img_info)
{
size_t remainder = firmware->size;
size_t to_cpy;
const u8 *buf = firmware->data;
int i = 0;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
while (remainder) {
MHI_ASSERT(i >= img_info->entries, "malformed vector table");
to_cpy = min(remainder, mhi_buf->len);
memcpy(mhi_buf->buf, buf, to_cpy);
bhi_vec->dma_addr = mhi_buf->dma_addr;
bhi_vec->size = to_cpy;
MHI_VERB("Setting Vector:0x%llx size: %llu\n",
bhi_vec->dma_addr, bhi_vec->size);
buf += to_cpy;
remainder -= to_cpy;
i++;
bhi_vec++;
mhi_buf++;
}
}
void mhi_fw_load_worker(struct work_struct *work)
{
int ret;
struct mhi_controller *mhi_cntrl;
const char *fw_name;
const struct firmware *firmware;
struct image_info *image_info;
void *buf;
dma_addr_t dma_addr;
size_t size;
mhi_cntrl = container_of(work, struct mhi_controller, fw_worker);
MHI_LOG("Waiting for device to enter PBL from EE:%s\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee));
ret = wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_PBL(mhi_cntrl->ee) ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid state\n");
return;
}
MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee));
/* if device in pthru, we do not have to load firmware */
if (mhi_cntrl->ee == MHI_EE_PTHRU)
return;
fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
mhi_cntrl->edl_image : mhi_cntrl->fw_image;
if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
!mhi_cntrl->seg_len))) {
MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n");
return;
}
ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev);
if (ret) {
MHI_ERR("Error loading firmware, ret:%d\n", ret);
return;
}
size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size;
/* the sbl size provided is maximum size, not necessarily image size */
if (size > firmware->size)
size = firmware->size;
buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
if (!buf) {
MHI_ERR("Could not allocate memory for image\n");
release_firmware(firmware);
return;
}
/* load sbl image */
memcpy(buf, firmware->data, size);
ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
mhi_free_coherent(mhi_cntrl, size, buf, dma_addr);
/* error or in edl, we're done */
if (ret || mhi_cntrl->ee == MHI_EE_EDL) {
release_firmware(firmware);
return;
}
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_cntrl->pm_lock);
/*
* if we're doing fbc, populate vector tables while
* device transitioning into MHI READY state
*/
if (mhi_cntrl->fbc_download) {
ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
firmware->size);
if (ret) {
MHI_ERR("Error alloc size of %zu\n", firmware->size);
goto error_alloc_fw_table;
}
MHI_LOG("Copying firmware image into vector table\n");
/* load the firmware into BHIE vec table */
mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
}
/* transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
if (!mhi_cntrl->fbc_download) {
release_firmware(firmware);
return;
}
if (ret) {
MHI_ERR("Did not transition to READY state\n");
goto error_read;
}
/* wait for SBL event */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_SBL ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI did not enter BHIE\n");
goto error_read;
}
/* start full firmware image download */
image_info = mhi_cntrl->fbc_image;
ret = mhi_fw_load_amss(mhi_cntrl,
/* last entry is vec table */
&image_info->mhi_buf[image_info->entries - 1]);
MHI_LOG("amss fw_load, ret:%d\n", ret);
release_firmware(firmware);
return;
error_read:
mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
mhi_cntrl->fbc_image = NULL;
error_alloc_fw_table:
release_firmware(firmware);
}
int BhiWrite(struct mhi_controller *mhi_cntrl, void __user *ubuf, size_t size)
{
int ret;
dma_addr_t dma_addr;
void *dma_buf;
MHI_LOG("Device current EE:%s, M:%s, PM:%s\n",
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)),
TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
#if 0
if (mhi_get_exec_env(mhi_cntrl) == MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_EDL) {
mhi_cntrl->ee = MHI_EE_EDL;
wait_event_timeout(mhi_cntrl->state_event,
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms + 500));
}
#endif
#if 0
if (!MHI_IN_PBL(mhi_cntrl->ee) || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI is not in valid BHI state\n");
return -EINVAL;
}
#endif
if (mhi_cntrl->ee != MHI_EE_EDL) {
MHI_ERR("MHI is not in EDL state\n");
return -EINVAL;
}
dma_buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
if (!dma_buf) {
MHI_ERR("Could not allocate memory for image\n");
return -ENOMEM;
}
ret = copy_from_user(dma_buf, ubuf, size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %d\n", ret);
mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);;
return ret;
}
ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size);
mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);
if (ret) {
MHI_ERR("ret = %d, ee=%d\n", ret, mhi_cntrl->ee);
goto error_state;
}
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->dev_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_cntrl->pm_lock);
/* transitioning into MHI RESET->READY state */
ret = mhi_ready_state_transition(mhi_cntrl);
if (ret) {
MHI_ERR("Did not transition to READY state\n");
goto error_state;
}
MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
/* wait for BHIE event */
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->ee == MHI_EE_FP ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
MHI_ERR("MHI did not enter Flash Programmer Environment\n");
goto error_state;
}
MHI_LOG("MHI enter Flash Programmer Environment\n");
return 0;
error_state:
MHI_LOG("Device current EE:%s, M:%s\n",
TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)),
TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)));
return ret;
}
long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *ubuf)
{
long ret = -EINVAL;
BHI_INFO_TYPE bhi_info;
ret = BhiRead(mhi_cntrl, &bhi_info);
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret);
return ret;
}
ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info));
if (ret) {
MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret);
}
return ret;
}
long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *ubuf)
{
long ret = -EINVAL;
size_t size;
ret = copy_from_user(&size, ubuf, sizeof(size));
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret);
return ret;
}
if (size <= 0) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, size\n");
return -EINVAL;
}
ret = BhiWrite(mhi_cntrl, ubuf+sizeof(size), size);
if (ret) {
MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret);
}
return ret;
}

View File

@ -0,0 +1,274 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/termios.h>
#include <linux/wait.h>
#include "mhi.h"
#include "mhi_internal.h"
struct __packed dtr_ctrl_msg {
u32 preamble;
u32 msg_id;
u32 dest_id;
u32 size;
u32 msg;
};
#define CTRL_MAGIC (0x4C525443)
#define CTRL_MSG_DTR BIT(0)
#define CTRL_MSG_RTS BIT(1)
#define CTRL_MSG_DCD BIT(0)
#define CTRL_MSG_DSR BIT(1)
#define CTRL_MSG_RI BIT(3)
#define CTRL_HOST_STATE (0x10)
#define CTRL_DEVICE_STATE (0x11)
#define CTRL_GET_CHID(dtr) (dtr->dest_id & 0xFF)
static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl,
struct mhi_device *mhi_dev,
u32 tiocm)
{
struct dtr_ctrl_msg *dtr_msg = NULL;
struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
spinlock_t *res_lock = &mhi_dev->dev.devres_lock;
u32 cur_tiocm;
int ret = 0;
cur_tiocm = mhi_dev->tiocm & ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
tiocm &= (TIOCM_DTR | TIOCM_RTS);
/* state did not changed */
if (cur_tiocm == tiocm)
return 0;
mutex_lock(&dtr_chan->mutex);
dtr_msg = kzalloc(sizeof(*dtr_msg), GFP_KERNEL);
if (!dtr_msg) {
ret = -ENOMEM;
goto tiocm_exit;
}
dtr_msg->preamble = CTRL_MAGIC;
dtr_msg->msg_id = CTRL_HOST_STATE;
dtr_msg->dest_id = mhi_dev->ul_chan_id;
dtr_msg->size = sizeof(u32);
if (tiocm & TIOCM_DTR)
dtr_msg->msg |= CTRL_MSG_DTR;
if (tiocm & TIOCM_RTS)
dtr_msg->msg |= CTRL_MSG_RTS;
/*
* 'minicom -D /dev/mhi_DUN' will send RTS:1 when open, and RTS:0 when exit.
* RTS:0 will prevent modem output AT response.
* But 'busybox microcom' do not send any RTS to modem.
* [75094.969783] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:1
* [75100.210994] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:0
*/
dev_dbg(&mhi_dev->dev, "%s DTR:%d RTS:%d\n", __func__,
!!(tiocm & TIOCM_DTR), !!(tiocm & TIOCM_RTS));
reinit_completion(&dtr_chan->completion);
ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg,
sizeof(*dtr_msg), MHI_EOT);
if (ret)
goto tiocm_exit;
ret = wait_for_completion_timeout(&dtr_chan->completion,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
if (!ret) {
MHI_ERR("Failed to receive transfer callback\n");
ret = -EIO;
goto tiocm_exit;
}
ret = 0;
spin_lock_irq(res_lock);
mhi_dev->tiocm &= ~(TIOCM_DTR | TIOCM_RTS);
mhi_dev->tiocm |= tiocm;
spin_unlock_irq(res_lock);
tiocm_exit:
kfree(dtr_msg);
mutex_unlock(&dtr_chan->mutex);
return ret;
}
long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
int ret;
/* ioctl not supported by this controller */
if (!mhi_cntrl->dtr_dev)
return -EIO;
switch (cmd) {
case TIOCMGET:
return mhi_dev->tiocm;
case TIOCMSET:
{
u32 tiocm;
ret = get_user(tiocm, (u32 *)arg);
if (ret)
return ret;
return mhi_dtr_tiocmset(mhi_cntrl, mhi_dev, tiocm);
}
default:
break;
}
return -EINVAL;
}
EXPORT_SYMBOL(mhi_ioctl);
static int mhi_dtr_queue_inbound(struct mhi_controller *mhi_cntrl)
{
struct mhi_device *mhi_dev = mhi_cntrl->dtr_dev;
int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
size_t mtu = mhi_dev->mtu;
void *buf;
int ret = -EIO, i;
for (i = 0; i < nr_trbs; i++) {
buf = kmalloc(mtu, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
MHI_EOT);
if (ret) {
kfree(buf);
return ret;
}
}
return ret;
}
static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct dtr_ctrl_msg *dtr_msg = mhi_result->buf_addr;
u32 chan;
spinlock_t *res_lock;
if (mhi_result->transaction_status == -ENOTCONN) {
kfree(mhi_result->buf_addr);
return;
}
if (mhi_result->bytes_xferd != sizeof(*dtr_msg)) {
MHI_ERR("Unexpected length %zu received\n",
mhi_result->bytes_xferd);
return;
}
MHI_LOG("preamble:0x%x msg_id:%u dest_id:%u msg:0x%x\n",
dtr_msg->preamble, dtr_msg->msg_id, dtr_msg->dest_id,
dtr_msg->msg);
chan = CTRL_GET_CHID(dtr_msg);
if (chan >= mhi_cntrl->max_chan)
goto auto_queue;
mhi_dev = mhi_cntrl->mhi_chan[chan].mhi_dev;
if (!mhi_dev)
goto auto_queue;
res_lock = &mhi_dev->dev.devres_lock;
spin_lock_irq(res_lock);
mhi_dev->tiocm &= ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI);
if (dtr_msg->msg & CTRL_MSG_DCD)
mhi_dev->tiocm |= TIOCM_CD;
if (dtr_msg->msg & CTRL_MSG_DSR)
mhi_dev->tiocm |= TIOCM_DSR;
if (dtr_msg->msg & CTRL_MSG_RI)
mhi_dev->tiocm |= TIOCM_RI;
spin_unlock_irq(res_lock);
auto_queue:
mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_FROM_DEVICE, mhi_result->buf_addr,
mhi_cntrl->dtr_dev->mtu, MHI_EOT);
}
static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan;
MHI_VERB("Received with status:%d\n", mhi_result->transaction_status);
if (!mhi_result->transaction_status)
complete(&dtr_chan->completion);
}
static void mhi_dtr_remove(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
mhi_cntrl->dtr_dev = NULL;
}
static int mhi_dtr_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
int ret;
MHI_LOG("Enter for DTR control channel\n");
mhi_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
ret = mhi_prepare_for_transfer(mhi_dev);
if (!ret)
mhi_cntrl->dtr_dev = mhi_dev;
if (!ret)
ret = mhi_dtr_queue_inbound(mhi_cntrl);
MHI_LOG("Exit with ret:%d\n", ret);
return ret;
}
static const struct mhi_device_id mhi_dtr_table[] = {
{ .chan = "IP_CTRL", .driver_data = sizeof(struct dtr_ctrl_msg) },
{},
};
static struct mhi_driver mhi_dtr_driver = {
.id_table = mhi_dtr_table,
.remove = mhi_dtr_remove,
.probe = mhi_dtr_probe,
.ul_xfer_cb = mhi_dtr_ul_xfer_cb,
.dl_xfer_cb = mhi_dtr_dl_xfer_cb,
.driver = {
.name = "MHI_DTR",
.owner = THIS_MODULE,
}
};
int __init mhi_dtr_init(void)
{
return mhi_driver_register(&mhi_dtr_driver);
}
void mhi_dtr_exit(void) {
mhi_driver_unregister(&mhi_dtr_driver);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,362 @@
#ifndef __SDX20_MHI_H
#define __SDX20_MHI_H
#include <linux/types.h>
/* MHI control data structures alloted by the host, including
* channel context array, event context array, command context and rings */
/* Channel context state */
enum mhi_dev_ch_ctx_state {
MHI_DEV_CH_STATE_DISABLED,
MHI_DEV_CH_STATE_ENABLED,
MHI_DEV_CH_STATE_RUNNING,
MHI_DEV_CH_STATE_SUSPENDED,
MHI_DEV_CH_STATE_STOP,
MHI_DEV_CH_STATE_ERROR,
MHI_DEV_CH_STATE_RESERVED,
MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
};
/* Channel type */
enum mhi_dev_ch_ctx_type {
MHI_DEV_CH_TYPE_NONE,
MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
MHI_DEV_CH_RESERVED
};
/* Channel context type */
struct mhi_dev_ch_ctx {
enum mhi_dev_ch_ctx_state ch_state;
enum mhi_dev_ch_ctx_type ch_type;
uint32_t err_indx;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
enum mhi_dev_ring_element_type_id {
MHI_DEV_RING_EL_INVALID = 0,
MHI_DEV_RING_EL_NOOP = 1,
MHI_DEV_RING_EL_TRANSFER = 2,
MHI_DEV_RING_EL_RESET = 16,
MHI_DEV_RING_EL_STOP = 17,
MHI_DEV_RING_EL_START = 18,
MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
MHI_DEV_RING_EL_UNDEF
};
enum mhi_dev_ring_state {
RING_STATE_UINT = 0,
RING_STATE_IDLE,
RING_STATE_PENDING,
};
enum mhi_dev_ring_type {
RING_TYPE_CMD = 0,
RING_TYPE_ER,
RING_TYPE_CH,
RING_TYPE_INVAL
};
/* Event context interrupt moderation */
enum mhi_dev_evt_ctx_int_mod_timer {
MHI_DEV_EVT_INT_MODERATION_DISABLED
};
/* Event ring type */
enum mhi_dev_evt_ctx_event_ring_type {
MHI_DEV_EVT_TYPE_DEFAULT,
MHI_DEV_EVT_TYPE_VALID,
MHI_DEV_EVT_RESERVED
};
/* Event ring context type */
struct mhi_dev_ev_ctx {
uint32_t res1:16;
enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
enum mhi_dev_evt_ctx_event_ring_type ertype;
uint32_t msivec;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Command context */
struct mhi_dev_cmd_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* generic context */
struct mhi_dev_gen_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Transfer ring element */
struct mhi_dev_transfer_ring_element {
uint64_t data_buf_ptr;
uint32_t len:16;
uint32_t res1:16;
uint32_t chain:1;
uint32_t res2:7;
uint32_t ieob:1;
uint32_t ieot:1;
uint32_t bei:1;
uint32_t res3:5;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res4:8;
} __packed;
/* Command ring element */
/* Command ring No op command */
struct mhi_dev_cmd_ring_op {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring reset channel command */
struct mhi_dev_cmd_ring_reset_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring stop channel command */
struct mhi_dev_cmd_ring_stop_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring start channel command */
struct mhi_dev_cmd_ring_start_channel_cmd {
uint64_t res1;
uint32_t seqnum;
uint32_t reliable:1;
uint32_t res2:15;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
enum mhi_dev_cmd_completion_code {
MHI_CMD_COMPL_CODE_INVALID = 0,
MHI_CMD_COMPL_CODE_SUCCESS = 1,
MHI_CMD_COMPL_CODE_EOT = 2,
MHI_CMD_COMPL_CODE_OVERFLOW = 3,
MHI_CMD_COMPL_CODE_EOB = 4,
MHI_CMD_COMPL_CODE_UNDEFINED = 16,
MHI_CMD_COMPL_CODE_RING_EL = 17,
MHI_CMD_COMPL_CODE_RES
};
/* Event ring elements */
/* Transfer completion event */
struct mhi_dev_event_ring_transfer_completion {
uint64_t ptr;
uint32_t len:16;
uint32_t res1:8;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command completion event */
struct mhi_dev_event_ring_cmd_completion {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_state {
MHI_DEV_RESET_STATE = 0,
MHI_DEV_READY_STATE,
MHI_DEV_M0_STATE,
MHI_DEV_M1_STATE,
MHI_DEV_M2_STATE,
MHI_DEV_M3_STATE,
MHI_DEV_MAX_STATE,
MHI_DEV_SYSERR_STATE = 0xff
};
/* MHI state change event */
struct mhi_dev_event_ring_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_execenv {
MHI_DEV_SBL_EE = 1,
MHI_DEV_AMSS_EE = 2,
MHI_DEV_UNRESERVED
};
/* EE state change event */
struct mhi_dev_event_ring_ee_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_execenv execenv:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
/* Generic cmd to parse common details like type and channel id */
struct mhi_dev_ring_generic {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
struct mhi_config {
uint32_t mhi_reg_len;
uint32_t version;
uint32_t event_rings;
uint32_t channels;
uint32_t chdb_offset;
uint32_t erdb_offset;
};
#define NUM_CHANNELS 128
#define HW_CHANNEL_BASE 100
#define HW_CHANNEL_END 107
#define MHI_ENV_VALUE 2
#define MHI_MASK_ROWS_CH_EV_DB 4
#define TRB_MAX_DATA_SIZE 8192
#define MHI_CTRL_STATE 25
#define IPA_DMA_SYNC 1
#define IPA_DMA_ASYNC 0
/*maximum trasnfer completion events buffer*/
#define MAX_TR_EVENTS 50
/*maximum event requests */
#define MHI_MAX_EVT_REQ 50
/* Possible ring element types */
union mhi_dev_ring_element_type {
struct mhi_dev_cmd_ring_op cmd_no_op;
struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
struct mhi_dev_transfer_ring_element cmd_transfer;
struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
struct mhi_dev_event_ring_state_change evt_state_change;
struct mhi_dev_event_ring_ee_state_change evt_ee_state;
struct mhi_dev_ring_generic generic;
};
/* Transfer ring element type */
union mhi_dev_ring_ctx {
struct mhi_dev_cmd_ctx cmd;
struct mhi_dev_ev_ctx ev;
struct mhi_dev_ch_ctx ch;
struct mhi_dev_gen_ctx generic;
};
/* MHI host Control and data address region */
struct mhi_host_addr {
uint32_t ctrl_base_lsb;
uint32_t ctrl_base_msb;
uint32_t ctrl_limit_lsb;
uint32_t ctrl_limit_msb;
uint32_t data_base_lsb;
uint32_t data_base_msb;
uint32_t data_limit_lsb;
uint32_t data_limit_msb;
};
/* MHI physical and virtual address region */
struct mhi_meminfo {
struct device *dev;
uintptr_t pa_aligned;
uintptr_t pa_unaligned;
uintptr_t va_aligned;
uintptr_t va_unaligned;
uintptr_t size;
};
struct mhi_addr {
uint64_t host_pa;
uintptr_t device_pa;
uintptr_t device_va;
size_t size;
dma_addr_t phy_addr;
void *virt_addr;
bool use_ipa_dma;
};
struct mhi_interrupt_state {
uint32_t mask;
uint32_t status;
};
enum mhi_dev_channel_state {
MHI_DEV_CH_UNINT,
MHI_DEV_CH_STARTED,
MHI_DEV_CH_PENDING_START,
MHI_DEV_CH_PENDING_STOP,
MHI_DEV_CH_STOPPED,
MHI_DEV_CH_CLOSED,
};
enum mhi_dev_ch_operation {
MHI_DEV_OPEN_CH,
MHI_DEV_CLOSE_CH,
MHI_DEV_READ_CH,
MHI_DEV_READ_WR,
MHI_DEV_POLL,
};
enum mhi_ctrl_info {
MHI_STATE_CONFIGURED = 0,
MHI_STATE_CONNECTED = 1,
MHI_STATE_DISCONNECTED = 2,
MHI_STATE_INVAL,
};
enum mhi_dev_tr_compl_evt_type {
SEND_EVENT_BUFFER,
SEND_EVENT_RD_OFFSET,
};
enum mhi_dev_transfer_type {
MHI_DEV_DMA_SYNC,
MHI_DEV_DMA_ASYNC,
};
#endif /* _SDX20_MHI_H_ */

View File

@ -0,0 +1,426 @@
#ifndef __SDX20_MHI_H
#define __SDX20_MHI_H
#include <linux/types.h>
/* MHI control data structures alloted by the host, including
* channel context array, event context array, command context and rings */
/* Channel context state */
enum mhi_dev_ch_ctx_state {
MHI_DEV_CH_STATE_DISABLED,
MHI_DEV_CH_STATE_ENABLED,
MHI_DEV_CH_STATE_RUNNING,
MHI_DEV_CH_STATE_SUSPENDED,
MHI_DEV_CH_STATE_STOP,
MHI_DEV_CH_STATE_ERROR,
MHI_DEV_CH_STATE_RESERVED,
MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
};
/* Channel type */
enum mhi_dev_ch_ctx_type {
MHI_DEV_CH_TYPE_NONE,
MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
MHI_DEV_CH_RESERVED
};
/* Channel context type */
struct mhi_dev_ch_ctx {
enum mhi_dev_ch_ctx_state ch_state;
enum mhi_dev_ch_ctx_type ch_type;
uint32_t err_indx;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
enum mhi_dev_ring_element_type_id {
MHI_DEV_RING_EL_INVALID = 0,
MHI_DEV_RING_EL_NOOP = 1,
MHI_DEV_RING_EL_TRANSFER = 2,
MHI_DEV_RING_EL_RESET = 16,
MHI_DEV_RING_EL_STOP = 17,
MHI_DEV_RING_EL_START = 18,
MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
MHI_DEV_RING_EL_UNDEF
};
enum mhi_dev_ring_state {
RING_STATE_UINT = 0,
RING_STATE_IDLE,
RING_STATE_PENDING,
};
enum mhi_dev_ring_type {
RING_TYPE_CMD = 0,
RING_TYPE_ER,
RING_TYPE_CH,
RING_TYPE_INVAL
};
/* Event context interrupt moderation */
enum mhi_dev_evt_ctx_int_mod_timer {
MHI_DEV_EVT_INT_MODERATION_DISABLED
};
/* Event ring type */
enum mhi_dev_evt_ctx_event_ring_type {
MHI_DEV_EVT_TYPE_DEFAULT,
MHI_DEV_EVT_TYPE_VALID,
MHI_DEV_EVT_RESERVED
};
/* Event ring context type */
struct mhi_dev_ev_ctx {
uint32_t res1:16;
enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
enum mhi_dev_evt_ctx_event_ring_type ertype;
uint32_t msivec;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Command context */
struct mhi_dev_cmd_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* generic context */
struct mhi_dev_gen_ctx {
uint32_t res1;
uint32_t res2;
uint32_t res3;
uint64_t rbase;
uint64_t rlen;
uint64_t rp;
uint64_t wp;
} __packed;
/* Transfer ring element */
struct mhi_dev_transfer_ring_element {
uint64_t data_buf_ptr;
uint32_t len:16;
uint32_t res1:16;
uint32_t chain:1;
uint32_t res2:7;
uint32_t ieob:1;
uint32_t ieot:1;
uint32_t bei:1;
uint32_t res3:5;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res4:8;
} __packed;
/* Command ring element */
/* Command ring No op command */
struct mhi_dev_cmd_ring_op {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring reset channel command */
struct mhi_dev_cmd_ring_reset_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring stop channel command */
struct mhi_dev_cmd_ring_stop_channel_cmd {
uint64_t res1;
uint32_t res2;
uint32_t res3:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command ring start channel command */
struct mhi_dev_cmd_ring_start_channel_cmd {
uint64_t res1;
uint32_t seqnum;
uint32_t reliable:1;
uint32_t res2:15;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
enum mhi_dev_cmd_completion_code {
MHI_CMD_COMPL_CODE_INVALID = 0,
MHI_CMD_COMPL_CODE_SUCCESS = 1,
MHI_CMD_COMPL_CODE_EOT = 2,
MHI_CMD_COMPL_CODE_OVERFLOW = 3,
MHI_CMD_COMPL_CODE_EOB = 4,
MHI_CMD_COMPL_CODE_UNDEFINED = 16,
MHI_CMD_COMPL_CODE_RING_EL = 17,
MHI_CMD_COMPL_CODE_RES
};
/* Event ring elements */
/* Transfer completion event */
struct mhi_dev_event_ring_transfer_completion {
uint64_t ptr;
uint32_t len:16;
uint32_t res1:8;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
/* Command completion event */
struct mhi_dev_event_ring_cmd_completion {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_cmd_completion_code code:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_state {
MHI_DEV_RESET_STATE = 0,
MHI_DEV_READY_STATE,
MHI_DEV_M0_STATE,
MHI_DEV_M1_STATE,
MHI_DEV_M2_STATE,
MHI_DEV_M3_STATE,
MHI_DEV_MAX_STATE,
MHI_DEV_SYSERR_STATE = 0xff
};
/* MHI state change event */
struct mhi_dev_event_ring_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
enum mhi_dev_execenv {
MHI_DEV_SBL_EE = 1,
MHI_DEV_AMSS_EE = 2,
MHI_DEV_UNRESERVED
};
/* EE state change event */
struct mhi_dev_event_ring_ee_state_change {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_execenv execenv:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t res3:8;
} __packed;
/* Generic cmd to parse common details like type and channel id */
struct mhi_dev_ring_generic {
uint64_t ptr;
uint32_t res1:24;
enum mhi_dev_state mhistate:8;
uint32_t res2:16;
enum mhi_dev_ring_element_type_id type:8;
uint32_t chid:8;
} __packed;
struct mhi_config {
uint32_t mhi_reg_len;
uint32_t version;
uint32_t event_rings;
uint32_t channels;
uint32_t chdb_offset;
uint32_t erdb_offset;
};
#define NUM_CHANNELS 128
#define HW_CHANNEL_BASE 100
#define HW_CHANNEL_END 107
#define MHI_ENV_VALUE 2
#define MHI_MASK_ROWS_CH_EV_DB 4
#define TRB_MAX_DATA_SIZE 8192
#define MHI_CTRL_STATE 25
#define IPA_DMA_SYNC 1
#define IPA_DMA_ASYNC 0
/*maximum trasnfer completion events buffer*/
#define MAX_TR_EVENTS 50
/*maximum event requests */
#define MHI_MAX_EVT_REQ 50
/* Possible ring element types */
union mhi_dev_ring_element_type {
struct mhi_dev_cmd_ring_op cmd_no_op;
struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
struct mhi_dev_transfer_ring_element tre;
struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
struct mhi_dev_event_ring_state_change evt_state_change;
struct mhi_dev_event_ring_ee_state_change evt_ee_state;
struct mhi_dev_ring_generic generic;
};
/* Transfer ring element type */
union mhi_dev_ring_ctx {
struct mhi_dev_cmd_ctx cmd;
struct mhi_dev_ev_ctx ev;
struct mhi_dev_ch_ctx ch;
struct mhi_dev_gen_ctx generic;
};
/* MHI host Control and data address region */
struct mhi_host_addr {
uint32_t ctrl_base_lsb;
uint32_t ctrl_base_msb;
uint32_t ctrl_limit_lsb;
uint32_t ctrl_limit_msb;
uint32_t data_base_lsb;
uint32_t data_base_msb;
uint32_t data_limit_lsb;
uint32_t data_limit_msb;
};
/* MHI physical and virtual address region */
struct mhi_meminfo {
struct device *dev;
uintptr_t pa_aligned;
uintptr_t pa_unaligned;
uintptr_t va_aligned;
uintptr_t va_unaligned;
uintptr_t size;
};
struct mhi_addr {
uint64_t host_pa;
uintptr_t device_pa;
uintptr_t device_va;
size_t size;
dma_addr_t phy_addr;
void *virt_addr;
bool use_ipa_dma;
};
struct mhi_interrupt_state {
uint32_t mask;
uint32_t status;
};
enum mhi_dev_channel_state {
MHI_DEV_CH_UNINT,
MHI_DEV_CH_STARTED,
MHI_DEV_CH_PENDING_START,
MHI_DEV_CH_PENDING_STOP,
MHI_DEV_CH_STOPPED,
MHI_DEV_CH_CLOSED,
};
enum mhi_dev_ch_operation {
MHI_DEV_OPEN_CH,
MHI_DEV_CLOSE_CH,
MHI_DEV_READ_CH,
MHI_DEV_READ_WR,
MHI_DEV_POLL,
};
enum mhi_ctrl_info {
MHI_STATE_CONFIGURED = 0,
MHI_STATE_CONNECTED = 1,
MHI_STATE_DISCONNECTED = 2,
MHI_STATE_INVAL,
};
enum mhi_dev_tr_compl_evt_type {
SEND_EVENT_BUFFER,
SEND_EVENT_RD_OFFSET,
};
enum mhi_dev_transfer_type {
MHI_DEV_DMA_SYNC,
MHI_DEV_DMA_ASYNC,
};
#if 0
/* SW channel client list */
enum mhi_client_channel {
MHI_CLIENT_LOOPBACK_OUT = 0,
MHI_CLIENT_LOOPBACK_IN = 1,
MHI_CLIENT_SAHARA_OUT = 2,
MHI_CLIENT_SAHARA_IN = 3,
MHI_CLIENT_DIAG_OUT = 4,
MHI_CLIENT_DIAG_IN = 5,
MHI_CLIENT_SSR_OUT = 6,
MHI_CLIENT_SSR_IN = 7,
MHI_CLIENT_QDSS_OUT = 8,
MHI_CLIENT_QDSS_IN = 9,
MHI_CLIENT_EFS_OUT = 10,
MHI_CLIENT_EFS_IN = 11,
MHI_CLIENT_MBIM_OUT = 12,
MHI_CLIENT_MBIM_IN = 13,
MHI_CLIENT_QMI_OUT = 14,
MHI_CLIENT_QMI_IN = 15,
MHI_CLIENT_IP_CTRL_0_OUT = 16,
MHI_CLIENT_IP_CTRL_0_IN = 17,
MHI_CLIENT_IP_CTRL_1_OUT = 18,
MHI_CLIENT_IP_CTRL_1_IN = 19,
MHI_CLIENT_DCI_OUT = 20,
MHI_CLIENT_DCI_IN = 21,
MHI_CLIENT_IP_CTRL_3_OUT = 22,
MHI_CLIENT_IP_CTRL_3_IN = 23,
MHI_CLIENT_IP_CTRL_4_OUT = 24,
MHI_CLIENT_IP_CTRL_4_IN = 25,
MHI_CLIENT_IP_CTRL_5_OUT = 26,
MHI_CLIENT_IP_CTRL_5_IN = 27,
MHI_CLIENT_IP_CTRL_6_OUT = 28,
MHI_CLIENT_IP_CTRL_6_IN = 29,
MHI_CLIENT_IP_CTRL_7_OUT = 30,
MHI_CLIENT_IP_CTRL_7_IN = 31,
MHI_CLIENT_DUN_OUT = 32,
MHI_CLIENT_DUN_IN = 33,
MHI_CLIENT_IP_SW_0_OUT = 34,
MHI_CLIENT_IP_SW_0_IN = 35,
MHI_CLIENT_IP_SW_1_OUT = 36,
MHI_CLIENT_IP_SW_1_IN = 37,
MHI_CLIENT_IP_SW_2_OUT = 38,
MHI_CLIENT_IP_SW_2_IN = 39,
MHI_CLIENT_IP_SW_3_OUT = 40,
MHI_CLIENT_IP_SW_3_IN = 41,
MHI_CLIENT_CSVT_OUT = 42,
MHI_CLIENT_CSVT_IN = 43,
MHI_CLIENT_SMCT_OUT = 44,
MHI_CLIENT_SMCT_IN = 45,
MHI_CLIENT_IP_SW_4_OUT = 46,
MHI_CLIENT_IP_SW_4_IN = 47,
MHI_MAX_SOFTWARE_CHANNELS = 48,
MHI_CLIENT_TEST_OUT = 60,
MHI_CLIENT_TEST_IN = 61,
MHI_CLIENT_RESERVED_1_LOWER = 62,
MHI_CLIENT_RESERVED_1_UPPER = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
MHI_CLIENT_RESERVED_2_LOWER = 102,
MHI_CLIENT_RESERVED_2_UPPER = 127,
MHI_MAX_CHANNELS = 102,
};
#endif
#endif /* _SDX20_MHI_H_ */

View File

@ -0,0 +1,33 @@
menu "MHI device support"
config MHI_NETDEV
tristate "MHI NETDEV"
depends on MHI_BUS
help
MHI based net device driver for transferring IP traffic
between host and modem. By enabling this driver, clients
can transfer data using standard network interface. Over
the air traffic goes thru mhi netdev interface.
config MHI_UCI
tristate "MHI UCI"
depends on MHI_BUS
help
MHI based uci driver is for transferring data between host and
modem using standard file operations from user space. Open, read,
write, ioctl, and close operations are supported by this driver.
Please check mhi_uci_match_table for all supported channels that
are exposed to userspace.
config MHI_SATELLITE
tristate "MHI SATELLITE"
depends on MHI_BUS
help
MHI proxy satellite device driver enables NON-HLOS MHI satellite
drivers to communicate with device over PCIe link without host
involvement. Host facilitates propagation of events from device
to NON-HLOS MHI satellite drivers, channel states, and power
management over IPC communication. It helps in HLOS power
savings.
endmenu

View File

@ -0,0 +1,3 @@
obj-$(CONFIG_MHI_NETDEV) +=mhi_netdev.o
obj-$(CONFIG_MHI_UCI) +=mhi_uci.o
obj-$(CONFIG_MHI_SATELLITE) +=mhi_satellite.o

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,981 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/version.h>
#if 1
static inline void *ipc_log_context_create(int max_num_pages,
const char *modname, uint16_t user_version)
{ return NULL; }
static inline int ipc_log_string(void *ilctxt, const char *fmt, ...)
{ return -EINVAL; }
#endif
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
#include <linux/tty.h>
#include "../core/mhi.h"
#define DEVICE_NAME "mhi"
#define MHI_UCI_DRIVER_NAME "mhi_uci_q"
struct uci_chan {
wait_queue_head_t wq;
spinlock_t lock;
struct list_head pending; /* user space waiting to read */
struct uci_buf *cur_buf; /* current buffer user space reading */
size_t rx_size;
};
struct uci_buf {
struct page *page;
void *data;
size_t len;
unsigned nr_trb;
struct list_head node;
};
struct uci_dev {
struct list_head node;
dev_t devt;
struct device *dev;
struct mhi_device *mhi_dev;
const char *chan;
struct mutex mutex; /* sync open and close */
struct mutex r_mutex;
struct mutex w_mutex;
struct uci_chan ul_chan;
struct uci_chan dl_chan;
size_t mtu;
int ref_count;
bool enabled;
unsigned rx_error;
unsigned nr_trb;
unsigned nr_trbs;
struct uci_buf *uci_buf;
struct ktermios termios;
size_t bytes_xferd;
};
struct mhi_uci_drv {
struct list_head head;
struct mutex lock;
struct class *class;
int major;
dev_t dev_t;
};
static int uci_msg_lvl = MHI_MSG_LVL_ERROR;
module_param( uci_msg_lvl, uint, S_IRUGO | S_IWUSR);
#define MSG_VERB(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MSG_LOG(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_INFO) \
pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MSG_ERR(fmt, ...) do { \
if (uci_msg_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MAX_UCI_DEVICES (64)
#define QUEC_MHI_UCI_ALWAYS_OPEN //by now, sdx20 can not handle "start-reset-start" operation, so the simply solution is keep start state
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
#ifdef TCGETS2
__weak int user_termios_to_kernel_termios(struct ktermios *k,
struct termios2 __user *u)
{
return copy_from_user(k, u, sizeof(struct termios2));
}
__weak int kernel_termios_to_user_termios(struct termios2 __user *u,
struct ktermios *k)
{
return copy_to_user(u, k, sizeof(struct termios2));
}
__weak int user_termios_to_kernel_termios_1(struct ktermios *k,
struct termios __user *u)
{
return copy_from_user(k, u, sizeof(struct termios));
}
__weak int kernel_termios_to_user_termios_1(struct termios __user *u,
struct ktermios *k)
{
return copy_to_user(u, k, sizeof(struct termios));
}
#else
__weak int user_termios_to_kernel_termios(struct ktermios *k,
struct termios __user *u)
{
return copy_from_user(k, u, sizeof(struct termios));
}
__weak int kernel_termios_to_user_termios(struct termios __user *u,
struct ktermios *k)
{
return copy_to_user(u, k, sizeof(struct termios));
}
#endif /* TCGETS2 */
#endif
static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES);
static struct mhi_uci_drv mhi_uci_drv;
static int mhi_queue_inbound(struct uci_dev *uci_dev)
{
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
size_t mtu = uci_dev->mtu;
void *buf;
struct uci_buf *uci_buf;
int ret = -EIO, i;
if (uci_dev->uci_buf == NULL) {
uci_dev->nr_trb = 0;
uci_dev->nr_trbs = (nr_trbs + 1);
uci_dev->uci_buf = kmalloc_array(uci_dev->nr_trbs, sizeof(*uci_buf), GFP_KERNEL);
if (!uci_dev->uci_buf)
return -ENOMEM;
uci_buf = uci_dev->uci_buf;
for (i = 0; i < uci_dev->nr_trbs; i++, uci_buf++) {
uci_buf->page = alloc_pages(GFP_KERNEL, get_order(mtu));
if (!uci_buf->page)
return -ENOMEM;
uci_buf->data = page_address(uci_buf->page);
uci_buf->len = 0;
uci_buf->nr_trb = i;
if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN) {
//MSG_ERR("[%d] = %p\n", i, uci_buf->data);
}
}
}
for (i = 0; i < nr_trbs; i++) {
#if 0
buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
uci_buf = buf + mtu;
uci_buf->data = buf;
#else
uci_buf = &uci_dev->uci_buf[i];
buf = uci_buf->data;
#endif
MSG_VERB("Allocated buf %d of %d size %zu\n", i, nr_trbs, mtu);
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
MHI_EOT);
if (ret) {
#if 0
kfree(buf);
#endif
MSG_ERR("Failed to queue buffer %d\n", i);
return ret;
}
}
return ret;
}
static long mhi_uci_ioctl(struct file *file,
unsigned int cmd,
unsigned long arg)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
long ret = -ERESTARTSYS;
mutex_lock(&uci_dev->mutex);
if (uci_dev->enabled)
ret = mhi_ioctl(mhi_dev, cmd, arg);
if (uci_dev->enabled) {
switch (cmd) {
case TCGETS:
#ifndef TCGETS2
ret = kernel_termios_to_user_termios((struct termios __user *)arg, &uci_dev->termios);
#else
ret = kernel_termios_to_user_termios_1((struct termios __user *)arg, &uci_dev->termios);
#endif
break;
case TCSETSF:
case TCSETS:
#ifndef TCGETS2
ret = user_termios_to_kernel_termios(&uci_dev->termios, (struct termios __user *)arg);
#else
ret = user_termios_to_kernel_termios_1(&uci_dev->termios, (struct termios __user *)arg);
#endif
break;
case TCFLSH:
ret = 0;
break;
default:
break;
}
}
mutex_unlock(&uci_dev->mutex);
return ret;
}
static int mhi_uci_release(struct inode *inode, struct file *file)
{
struct uci_dev *uci_dev = file->private_data;
mutex_lock(&uci_dev->mutex);
uci_dev->ref_count--;
if (!uci_dev->ref_count) {
struct uci_chan *uci_chan;
MSG_LOG("Last client left, closing node\n");
if (uci_dev->enabled)
mhi_unprepare_from_transfer(uci_dev->mhi_dev);
/* clean inbound channel */
uci_chan = &uci_dev->dl_chan;
if (uci_dev->uci_buf) {
unsigned nr_trb = 0;
for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) {
if (uci_dev->uci_buf[nr_trb].page)
__free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu));
}
kfree(uci_dev->uci_buf);
}
uci_chan->cur_buf = NULL;
if (!uci_dev->enabled) {
MSG_LOG("Node is deleted, freeing dev node\n");
mutex_unlock(&uci_dev->mutex);
mutex_destroy(&uci_dev->mutex);
clear_bit(MINOR(uci_dev->devt), uci_minors);
kfree(uci_dev);
return 0;
}
}
MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count);
mutex_unlock(&uci_dev->mutex);
return 0;
}
static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan;
unsigned int mask = 0;
poll_wait(file, &uci_dev->dl_chan.wq, wait);
// ADPL and QDSS do not need poll write. xingduo.du 2023-02-16
// poll_wait(file, &uci_dev->ul_chan.wq, wait);
uci_chan = &uci_dev->dl_chan;
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
mask = POLLERR;
} else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
MSG_VERB("Client can read from node\n");
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_bh(&uci_chan->lock);
// ADPL and QDSS are single channel, ul_chan not be initilized. xingduo.du 2023-02-27
if (mhi_dev->ul_chan) {
poll_wait(file, &uci_dev->ul_chan.wq, wait);
uci_chan = &uci_dev->ul_chan;
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
mask |= POLLERR;
} else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) {
MSG_VERB("Client can write to node\n");
mask |= POLLOUT | POLLWRNORM;
}
if (!uci_dev->enabled)
mask |= POLLHUP;
if (uci_dev->rx_error)
mask |= POLLERR;
spin_unlock_bh(&uci_chan->lock);
}
MSG_LOG("Client attempted to poll, returning mask 0x%x\n", mask);
return mask;
}
static ssize_t mhi_uci_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *offp)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan = &uci_dev->ul_chan;
size_t bytes_xfered = 0;
int ret, nr_avail;
if (!buf || !count || uci_dev->rx_error)
return -EINVAL;
/* confirm channel is active */
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
spin_unlock_bh(&uci_chan->lock);
return -ERESTARTSYS;
}
MSG_VERB("Enter: to xfer:%zu bytes\n", count);
while (count) {
size_t xfer_size;
void *kbuf;
enum MHI_FLAGS flags;
spin_unlock_bh(&uci_chan->lock);
nr_avail = mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE);
if ((nr_avail == 0) && (file->f_flags & O_NONBLOCK))
return -EAGAIN;
/* wait for free descriptors */
ret = wait_event_interruptible(uci_chan->wq,
(!uci_dev->enabled) ||
(nr_avail = mhi_get_no_free_descriptors(mhi_dev,
DMA_TO_DEVICE)) > 0);
if (ret == -ERESTARTSYS || !uci_dev->enabled) {
MSG_LOG("Exit signal caught for node or not enabled\n");
return -ERESTARTSYS;
}
xfer_size = min_t(size_t, count, uci_dev->mtu);
kbuf = kmalloc(xfer_size, GFP_KERNEL);
if (!kbuf) {
MSG_ERR("Failed to allocate memory %zu\n", xfer_size);
return -ENOMEM;
}
ret = copy_from_user(kbuf, buf, xfer_size);
if (unlikely(ret)) {
kfree(kbuf);
return ret;
}
spin_lock_bh(&uci_chan->lock);
/* if ring is full after this force EOT */
if (nr_avail > 1 && (count - xfer_size))
flags = MHI_CHAIN;
else
flags = MHI_EOT;
if (uci_dev->enabled)
ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf,
xfer_size, flags);
else
ret = -ERESTARTSYS;
if (ret) {
kfree(kbuf);
goto sys_interrupt;
}
bytes_xfered += xfer_size;
count -= xfer_size;
buf += xfer_size;
}
spin_unlock_bh(&uci_chan->lock);
MSG_VERB("Exit: Number of bytes xferred:%zu\n", bytes_xfered);
return bytes_xfered;
sys_interrupt:
spin_unlock_bh(&uci_chan->lock);
return ret;
}
static ssize_t mhi_uci_read(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
struct uci_chan *uci_chan = &uci_dev->dl_chan;
struct uci_buf *uci_buf;
char *ptr;
size_t to_copy;
int ret = 0;
if (!buf || uci_dev->rx_error)
return -EINVAL;
MSG_VERB("Client provided buf len:%zu\n", count);
/* confirm channel is active */
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
spin_unlock_bh(&uci_chan->lock);
return -ERESTARTSYS;
}
/* No data available to read, wait */
if (!uci_chan->cur_buf && list_empty(&uci_chan->pending)) {
MSG_VERB("No data available to read waiting\n");
spin_unlock_bh(&uci_chan->lock);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(uci_chan->wq,
(!uci_dev->enabled ||
!list_empty(&uci_chan->pending)));
if (ret == -ERESTARTSYS) {
MSG_LOG("Exit signal caught for node\n");
return -ERESTARTSYS;
}
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
MSG_LOG("node is disabled\n");
ret = -ERESTARTSYS;
goto read_error;
}
}
/* new read, get the next descriptor from the list */
if (!uci_chan->cur_buf) {
uci_buf = list_first_entry_or_null(&uci_chan->pending,
struct uci_buf, node);
if (unlikely(!uci_buf)) {
ret = -EIO;
goto read_error;
}
if (uci_buf->node.next == LIST_POISON1 || uci_buf->node.prev == LIST_POISON1) {
dump_stack();
ret = -EIO;
MSG_ERR("chan[%d] data=%p, len=%zd, nr_trb=%d\n",
mhi_dev->dl_chan_id, uci_buf->data, uci_buf->len, uci_buf->nr_trb);
goto read_error;
}
list_del(&uci_buf->node);
uci_chan->cur_buf = uci_buf;
uci_chan->rx_size = uci_buf->len;
MSG_VERB("Got pkt of size:%zu\n", uci_chan->rx_size);
}
uci_buf = uci_chan->cur_buf;
spin_unlock_bh(&uci_chan->lock);
/* Copy the buffer to user space */
to_copy = min_t(size_t, count, uci_chan->rx_size);
ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size);
ret = copy_to_user(buf, ptr, to_copy);
if (ret)
return ret;
MSG_VERB("Copied %zu of %zu bytes\n", to_copy, uci_chan->rx_size);
uci_chan->rx_size -= to_copy;
/* we finished with this buffer, queue it back to hardware */
if (!uci_chan->rx_size) {
spin_lock_bh(&uci_chan->lock);
uci_chan->cur_buf = NULL;
if (uci_dev->enabled)
#if 1 //this can make the address in ring do not change
{
if (uci_buf->page) {
unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1);
uci_buf = &uci_dev->uci_buf[nr_trb];
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
uci_buf->data, uci_dev->mtu,
MHI_EOT);
} else {
kfree(uci_buf);
ret = 0;
}
}
#endif
else
ret = -ERESTARTSYS;
if (ret) {
MSG_ERR("Failed to recycle element for chan:%d , ret=%d\n", mhi_dev->ul_chan_id, ret);
#if 0
kfree(uci_buf->data);
#endif
goto read_error;
}
spin_unlock_bh(&uci_chan->lock);
}
MSG_VERB("Returning %zu bytes\n", to_copy);
return to_copy;
read_error:
spin_unlock_bh(&uci_chan->lock);
return ret;
}
static ssize_t mhi_uci_write_mutex(struct file *file,
const char __user *buf,
size_t count,
loff_t *offp)
{
struct uci_dev *uci_dev = file->private_data;
int ret;
ret = mutex_lock_interruptible(&uci_dev->w_mutex); /*concurrent writes */
if (ret < 0)
return -ERESTARTSYS;
ret = mhi_uci_write(file, buf, count, offp);
mutex_unlock(&uci_dev->w_mutex);
return ret;
}
static ssize_t mhi_uci_read_mutex(struct file *file,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct uci_dev *uci_dev = file->private_data;
int ret;
ret = mutex_lock_interruptible(&uci_dev->r_mutex); /*concurrent reads */
if (ret < 0)
return -ERESTARTSYS;
ret = mhi_uci_read(file, buf, count, ppos);
mutex_unlock(&uci_dev->r_mutex);
return ret;
}
static int mhi_uci_open(struct inode *inode, struct file *filp)
{
struct uci_dev *uci_dev = NULL, *tmp_dev;
int ret = -EIO;
struct uci_chan *dl_chan;
mutex_lock(&mhi_uci_drv.lock);
list_for_each_entry(tmp_dev, &mhi_uci_drv.head, node) {
if (tmp_dev->devt == inode->i_rdev) {
uci_dev = tmp_dev;
break;
}
}
/* could not find a minor node */
if (!uci_dev)
goto error_exit;
mutex_lock(&uci_dev->mutex);
if (!uci_dev->enabled) {
MSG_ERR("Node exist, but not in active state!\n");
goto error_open_chan;
}
uci_dev->ref_count++;
MSG_LOG("Node open, ref counts %u\n", uci_dev->ref_count);
if (uci_dev->ref_count == 1) {
MSG_LOG("Starting channel\n");
ret = mhi_prepare_for_transfer(uci_dev->mhi_dev);
if (ret) {
MSG_ERR("Error starting transfer channels\n");
uci_dev->ref_count--;
goto error_open_chan;
}
ret = mhi_queue_inbound(uci_dev);
if (ret)
goto error_rx_queue;
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
uci_dev->ref_count++;
#endif
}
filp->private_data = uci_dev;
mutex_unlock(&uci_dev->mutex);
mutex_unlock(&mhi_uci_drv.lock);
return 0;
error_rx_queue:
dl_chan = &uci_dev->dl_chan;
mhi_unprepare_from_transfer(uci_dev->mhi_dev);
if (uci_dev->uci_buf) {
unsigned nr_trb = 0;
for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) {
if (uci_dev->uci_buf[nr_trb].page)
__free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu));
}
kfree(uci_dev->uci_buf);
}
error_open_chan:
mutex_unlock(&uci_dev->mutex);
error_exit:
mutex_unlock(&mhi_uci_drv.lock);
return ret;
}
static const struct file_operations mhidev_fops = {
.open = mhi_uci_open,
.release = mhi_uci_release,
.read = mhi_uci_read_mutex,
.write = mhi_uci_write_mutex,
.poll = mhi_uci_poll,
.unlocked_ioctl = mhi_uci_ioctl,
};
static void mhi_uci_remove(struct mhi_device *mhi_dev)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
MSG_LOG("Enter\n");
mutex_lock(&mhi_uci_drv.lock);
mutex_lock(&uci_dev->mutex);
/* disable the node */
spin_lock_irq(&uci_dev->dl_chan.lock);
spin_lock_irq(&uci_dev->ul_chan.lock);
uci_dev->enabled = false;
spin_unlock_irq(&uci_dev->ul_chan.lock);
spin_unlock_irq(&uci_dev->dl_chan.lock);
wake_up(&uci_dev->dl_chan.wq);
wake_up(&uci_dev->ul_chan.wq);
/* delete the node to prevent new opens */
device_destroy(mhi_uci_drv.class, uci_dev->devt);
uci_dev->dev = NULL;
list_del(&uci_dev->node);
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
if (uci_dev->ref_count > 0)
uci_dev->ref_count--;
#endif
/* safe to free memory only if all file nodes are closed */
if (!uci_dev->ref_count) {
mutex_unlock(&uci_dev->mutex);
mutex_destroy(&uci_dev->mutex);
clear_bit(MINOR(uci_dev->devt), uci_minors);
kfree(uci_dev);
mutex_unlock(&mhi_uci_drv.lock);
return;
}
MSG_LOG("Exit\n");
mutex_unlock(&uci_dev->mutex);
mutex_unlock(&mhi_uci_drv.lock);
}
static int mhi_uci_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct uci_dev *uci_dev;
int minor;
char node_name[32];
int dir;
uci_dev = kzalloc(sizeof(*uci_dev), GFP_KERNEL);
if (!uci_dev)
return -ENOMEM;
mutex_init(&uci_dev->mutex);
mutex_init(&uci_dev->r_mutex);
mutex_init(&uci_dev->w_mutex);
uci_dev->mhi_dev = mhi_dev;
minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES);
if (minor >= MAX_UCI_DEVICES) {
kfree(uci_dev);
return -ENOSPC;
}
mutex_lock(&uci_dev->mutex);
mutex_lock(&mhi_uci_drv.lock);
uci_dev->devt = MKDEV(mhi_uci_drv.major, minor);
#if 1
if (mhi_dev->mhi_cntrl->cntrl_idx)
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s%d",
mhi_dev->chan_name, mhi_dev->mhi_cntrl->cntrl_idx);
else
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%s",
mhi_dev->chan_name);
#else
uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev,
uci_dev->devt, uci_dev,
DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d",
mhi_dev->dev_id, mhi_dev->domain,
mhi_dev->bus, mhi_dev->slot, "_pipe_",
mhi_dev->ul_chan_id);
#endif
set_bit(minor, uci_minors);
/* create debugging buffer */
snprintf(node_name, sizeof(node_name), "mhi_uci_%04x_%02u.%02u.%02u_%d",
mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot,
mhi_dev->ul_chan_id);
for (dir = 0; dir < 2; dir++) {
struct uci_chan *uci_chan = (dir) ?
&uci_dev->ul_chan : &uci_dev->dl_chan;
spin_lock_init(&uci_chan->lock);
init_waitqueue_head(&uci_chan->wq);
INIT_LIST_HEAD(&uci_chan->pending);
}
uci_dev->termios = tty_std_termios;
uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
mhi_device_set_devdata(mhi_dev, uci_dev);
uci_dev->enabled = true;
list_add(&uci_dev->node, &mhi_uci_drv.head);
mutex_unlock(&mhi_uci_drv.lock);
mutex_unlock(&uci_dev->mutex);
MSG_LOG("channel:%s successfully probed\n", mhi_dev->chan_name);
return 0;
};
static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
struct uci_chan *uci_chan = &uci_dev->ul_chan;
MSG_VERB("status:%d xfer_len:%zu\n", mhi_result->transaction_status,
mhi_result->bytes_xferd);
kfree(mhi_result->buf_addr);
if (!mhi_result->transaction_status)
wake_up(&uci_chan->wq);
}
static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
struct uci_chan *uci_chan = &uci_dev->dl_chan;
unsigned long flags;
struct uci_buf *buf;
unsigned nr_trb = uci_dev->nr_trb;
buf = &uci_dev->uci_buf[nr_trb];
if (buf == NULL) {
MSG_ERR("buf = NULL");
return;
}
if (buf->nr_trb != nr_trb || buf->data != mhi_result->buf_addr)
{
uci_dev->rx_error++;
MSG_ERR("chan[%d]: uci_buf[%u] = %p , mhi_result[%u] = %p\n",
mhi_dev->dl_chan_id, buf->nr_trb, buf->data, nr_trb, mhi_result->buf_addr);
return;
}
uci_dev->nr_trb++;
if (uci_dev->nr_trb == uci_dev->nr_trbs)
uci_dev->nr_trb = 0;
if (mhi_result->transaction_status == -ENOTCONN) {
return;
}
if (mhi_result->bytes_xferd > uci_dev->mtu || mhi_result->bytes_xferd <= 0)
{
MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n",
mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu);
return;
}
if (mhi_result->bytes_xferd > uci_dev->bytes_xferd)
{
uci_dev->bytes_xferd = mhi_result->bytes_xferd;
//MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n",
// mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu);
}
MSG_VERB("status:%d receive_len:%zu\n", mhi_result->transaction_status,
mhi_result->bytes_xferd);
spin_lock_irqsave(&uci_chan->lock, flags);
#if 0
buf = mhi_result->buf_addr + uci_dev->mtu;
buf->data = mhi_result->buf_addr;
#endif
buf->len = mhi_result->bytes_xferd;
if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN
|| mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN
|| mhi_dev->dl_chan_id == MHI_CLIENT_MBIM_IN)
{
struct uci_buf *tmp_buf = NULL;
int skip_buf = 0;
#ifdef QUEC_MHI_UCI_ALWAYS_OPEN
if (uci_dev->ref_count == 1)
skip_buf++;
#endif
if (!skip_buf)
tmp_buf = (struct uci_buf *)kmalloc(buf->len + sizeof(struct uci_buf), GFP_ATOMIC);;
if (tmp_buf) {
tmp_buf->page = NULL;
tmp_buf->data = ((void *)tmp_buf) + sizeof(struct uci_buf);
tmp_buf->len = buf->len;
memcpy(tmp_buf->data, buf->data, buf->len);
}
if (buf) {
struct uci_buf *uci_buf = buf;
unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1);
uci_buf = &uci_dev->uci_buf[nr_trb];
mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, uci_buf->data, uci_dev->mtu, MHI_EOT);
}
buf = tmp_buf;
}
if (buf)
list_add_tail(&buf->node, &uci_chan->pending);
spin_unlock_irqrestore(&uci_chan->lock, flags);
#ifdef CONFIG_PM_SLEEP
if (mhi_dev->dev.power.wakeup)
__pm_wakeup_event(mhi_dev->dev.power.wakeup, 0);
#endif
wake_up(&uci_chan->wq);
}
// repaire sdx6x module can not read qdb file. xingduo.du 2023-01-18
#define DIAG_MAX_PCIE_PKT_SZ 8192 //define by module
/* .driver_data stores max mtu */
static const struct mhi_device_id mhi_uci_match_table[] = {
{ .chan = "LOOPBACK", .driver_data = 0x1000 },
{ .chan = "SAHARA", .driver_data = 0x4000 },
{ .chan = "EDL", .driver_data = 0x4000 },
{ .chan = "DIAG", .driver_data = DIAG_MAX_PCIE_PKT_SZ },
{ .chan = "MBIM", .driver_data = 0x1000 },
{ .chan = "QMI0", .driver_data = 0x1000 },
{ .chan = "QMI1", .driver_data = 0x1000 },
{ .chan = "DUN", .driver_data = 0x1000 },
#ifdef ENABLE_ADPL
{ .chan = "ADPL", .driver_data = 0x1000 },
#endif
#ifdef ENABLE_QDSS
{ .chan = "QDSS", .driver_data = 0x1000 },
#endif
{},
};
static struct mhi_driver mhi_uci_driver = {
.id_table = mhi_uci_match_table,
.remove = mhi_uci_remove,
.probe = mhi_uci_probe,
.ul_xfer_cb = mhi_ul_xfer_cb,
.dl_xfer_cb = mhi_dl_xfer_cb,
.driver = {
.name = MHI_UCI_DRIVER_NAME,
.owner = THIS_MODULE,
},
};
int mhi_device_uci_init(void)
{
int ret;
ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops);
if (ret < 0)
return ret;
mhi_uci_drv.major = ret;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0))
mhi_uci_drv.class = class_create(MHI_UCI_DRIVER_NAME);
#else
mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME);
#endif
if (IS_ERR(mhi_uci_drv.class)) {
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
return -ENODEV;
}
mutex_init(&mhi_uci_drv.lock);
INIT_LIST_HEAD(&mhi_uci_drv.head);
ret = mhi_driver_register(&mhi_uci_driver);
if (ret) {
class_destroy(mhi_uci_drv.class);
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
}
return ret;
}
void mhi_device_uci_exit(void)
{
mhi_driver_unregister(&mhi_uci_driver);
class_destroy(mhi_uci_drv.class);
unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME);
}

View File

@ -0,0 +1,13 @@
#
# RMNET MAP driver
#
menuconfig RMNET
tristate "RmNet MAP driver"
default n
select GRO_CELLS
---help---
If you select this, you will enable the RMNET module which is used
for handling data in the multiplexing and aggregation protocol (MAP)
format in the embedded data path. RMNET devices can be attached to
any IP mode physical device.

View File

@ -0,0 +1,11 @@
#
# Makefile for the RMNET module
#
rmnet-y := rmnet_config.o
rmnet-y += rmnet_vnd.o
rmnet-y += rmnet_handlers.o
rmnet-y += rmnet_map_data.o
rmnet-y += rmnet_map_command.o
rmnet-y += rmnet_descriptor.o
obj-$(CONFIG_RMNET) += rmnet.o

View File

@ -0,0 +1,141 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET configuration engine
*
*/
#include <net/sock.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netdevice.h>
#include <linux/hashtable.h>
#include "rmnet_config.h"
#include "rmnet_handlers.h"
#include "rmnet_vnd.h"
#include "rmnet_private.h"
#include "rmnet_map.h"
#include "rmnet_descriptor.h"
/* Locking scheme -
* The shared resource which needs to be protected is realdev->rx_handler_data.
* For the writer path, this is using rtnl_lock(). The writer paths are
* rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
* paths are already called with rtnl_lock() acquired in. There is also an
* ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
* dereference here, we will need to use rtnl_dereference(). Dev list writing
* needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
* For the reader path, the real_dev->rx_handler_data is called in the TX / RX
* path. We only need rcu_read_lock() for these scenarios. In these cases,
* the rcu_read_lock() is held in __dev_queue_xmit() and
* netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
* to get the relevant information. For dev list reading, we again acquire
* rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
* We also use unregister_netdevice_many() to free all rmnet devices in
* rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
* same context.
*/
/* Local Definitions and Declarations */
static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
{
return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
}
/* Needs rtnl lock */
static struct rmnet_port*
rmnet_get_port_rtnl(const struct net_device *real_dev)
{
return rtnl_dereference(real_dev->rx_handler_data);
}
static int rmnet_unregister_real_device(struct net_device *real_dev,
struct rmnet_port *port)
{
if (port->nr_rmnet_devs)
return -EINVAL;
rmnet_map_cmd_exit(port);
rmnet_map_tx_aggregate_exit(port);
rmnet_descriptor_deinit(port);
kfree(port);
netdev_rx_handler_unregister(real_dev);
/* release reference on real_dev */
dev_put(real_dev);
netdev_dbg(real_dev, "Removed from rmnet\n");
return 0;
}
static int rmnet_register_real_device(struct net_device *real_dev)
{
struct rmnet_port *port;
int rc, entry;
ASSERT_RTNL();
if (rmnet_is_real_dev_registered(real_dev))
return 0;
port = kzalloc(sizeof(*port), GFP_ATOMIC);
if (!port)
return -ENOMEM;
port->dev = real_dev;
rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
if (rc) {
kfree(port);
return -EBUSY;
}
/* hold on to real dev for MAP data */
dev_hold(real_dev);
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
rc = rmnet_descriptor_init(port);
if (rc) {
rmnet_descriptor_deinit(port);
return rc;
}
rmnet_map_tx_aggregate_init(port);
rmnet_map_cmd_init(port);
netdev_dbg(real_dev, "registered with rmnet\n");
return 0;
}
/* Needs either rcu_read_lock() or rtnl lock */
static struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
{
if (rmnet_is_real_dev_registered(real_dev))
return rcu_dereference_rtnl(real_dev->rx_handler_data);
else
return NULL;
}
static struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
{
struct rmnet_endpoint *ep;
hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
if (ep->mux_id == mux_id)
return ep;
}
return NULL;
}

View File

@ -0,0 +1,174 @@
/* Copyright (c) 2013-2017, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data configuration engine
*
*/
#include <linux/skbuff.h>
#include <net/gro_cells.h>
#ifndef _RMNET_CONFIG_H_
#define _RMNET_CONFIG_H_
#define RMNET_MAX_LOGICAL_EP 255
#define RMNET_MAX_VEID 4
struct rmnet_endpoint {
u8 mux_id;
struct net_device *egress_dev;
struct hlist_node hlnode;
};
struct rmnet_port_priv_stats {
u64 dl_hdr_last_qmap_vers;
u64 dl_hdr_last_ep_id;
u64 dl_hdr_last_trans_id;
u64 dl_hdr_last_seq;
u64 dl_hdr_last_bytes;
u64 dl_hdr_last_pkts;
u64 dl_hdr_last_flows;
u64 dl_hdr_count;
u64 dl_hdr_total_bytes;
u64 dl_hdr_total_pkts;
u64 dl_trl_last_seq;
u64 dl_trl_count;
};
struct rmnet_egress_agg_params {
u16 agg_size;
u16 agg_count;
u32 agg_time;
};
/* One instance of this structure is instantiated for each real_dev associated
* with rmnet.
*/
struct rmnet_port {
struct net_device *dev;
u32 data_format;
u8 nr_rmnet_devs;
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
void *rmnet_perf;
struct rmnet_egress_agg_params egress_agg_params;
/* Protect aggregation related elements */
spinlock_t agg_lock;
struct sk_buff *agg_skb;
int agg_state;
u8 agg_count;
struct timespec agg_time;
struct timespec agg_last;
struct hrtimer hrtimer;
struct work_struct agg_wq;
/* dl marker elements */
struct list_head dl_list;
struct rmnet_port_priv_stats stats;
int dl_marker_flush;
/* Descriptor pool */
spinlock_t desc_pool_lock;
struct rmnet_frag_descriptor_pool *frag_desc_pool;
struct sk_buff *chain_head;
struct sk_buff *chain_tail;
};
extern struct rtnl_link_ops rmnet_link_ops;
struct rmnet_vnd_stats {
u64 rx_pkts;
u64 rx_bytes;
u64 tx_pkts;
u64 tx_bytes;
u32 tx_drops;
};
struct rmnet_pcpu_stats {
struct rmnet_vnd_stats stats;
struct u64_stats_sync syncp;
};
struct rmnet_coal_close_stats {
u64 non_coal;
u64 ip_miss;
u64 trans_miss;
u64 hw_nl;
u64 hw_pkt;
u64 hw_byte;
u64 hw_time;
u64 hw_evict;
u64 coal;
};
struct rmnet_coal_stats {
u64 coal_rx;
u64 coal_pkts;
u64 coal_hdr_nlo_err;
u64 coal_hdr_pkt_err;
u64 coal_csum_err;
u64 coal_reconstruct;
u64 coal_ip_invalid;
u64 coal_trans_invalid;
struct rmnet_coal_close_stats close;
u64 coal_veid[RMNET_MAX_VEID];
};
struct rmnet_priv_stats {
u64 csum_ok;
u64 csum_valid_unset;
u64 csum_validation_failed;
u64 csum_err_bad_buffer;
u64 csum_err_invalid_ip_version;
u64 csum_err_invalid_transport;
u64 csum_fragmented_pkt;
u64 csum_skipped;
u64 csum_sw;
u64 csum_hw;
struct rmnet_coal_stats coal;
};
struct rmnet_priv {
u8 mux_id;
struct net_device *real_dev;
struct rmnet_pcpu_stats __percpu *pcpu_stats;
struct gro_cells gro_cells;
struct rmnet_priv_stats stats;
};
enum rmnet_dl_marker_prio {
RMNET_PERF,
RMNET_SHS,
};
enum rmnet_trace_func {
RMNET_MODULE,
NW_STACK_MODULE,
};
enum rmnet_trace_evt {
RMNET_DLVR_SKB,
RMNET_RCV_FROM_PND,
RMNET_TX_UL_PKT,
NW_STACK_DEV_Q_XMIT,
NW_STACK_NAPI_GRO_FLUSH,
NW_STACK_RX,
NW_STACK_TX,
};
static int rmnet_is_real_dev_registered(const struct net_device *real_dev);
static struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
static struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
#endif /* _RMNET_CONFIG_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,661 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Packet Descriptor Framework
*
*/
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include "rmnet_config.h"
#include "rmnet_descriptor.h"
#include "rmnet_handlers.h"
#include "rmnet_private.h"
#include "rmnet_vnd.h"
#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64
#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port);
typedef void (*rmnet_perf_chain_hook_t)(void);
static struct rmnet_frag_descriptor *
rmnet_get_frag_descriptor(struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
struct rmnet_frag_descriptor *frag_desc;
spin_lock(&port->desc_pool_lock);
if (!list_empty(&pool->free_list)) {
frag_desc = list_first_entry(&pool->free_list,
struct rmnet_frag_descriptor,
list);
list_del_init(&frag_desc->list);
} else {
frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
if (!frag_desc)
goto out;
INIT_LIST_HEAD(&frag_desc->list);
INIT_LIST_HEAD(&frag_desc->sub_frags);
pool->pool_size++;
}
out:
spin_unlock(&port->desc_pool_lock);
return frag_desc;
}
static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool;
struct page *page = skb_frag_page(&frag_desc->frag);
list_del(&frag_desc->list);
if (page)
put_page(page);
memset(frag_desc, 0, sizeof(*frag_desc));
INIT_LIST_HEAD(&frag_desc->list);
INIT_LIST_HEAD(&frag_desc->sub_frags);
spin_lock(&port->desc_pool_lock);
list_add_tail(&frag_desc->list, &pool->free_list);
spin_unlock(&port->desc_pool_lock);
}
static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
struct page *p, u32 page_offset, u32 len)
{
struct rmnet_frag_descriptor *frag_desc;
frag_desc = rmnet_get_frag_descriptor(port);
if (!frag_desc)
return;
rmnet_frag_fill(frag_desc, p, page_offset, len);
list_add_tail(&frag_desc->list, list);
}
static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap,
struct rmnet_port *port,
int enable)
{
struct rmnet_map_control_command *cmd;
struct rmnet_endpoint *ep;
struct net_device *vnd;
u16 ip_family;
u16 fc_seq;
u32 qos_id;
u8 mux_id;
int r;
mux_id = qmap->mux_id;
cmd = (struct rmnet_map_control_command *)
((char *)qmap + sizeof(*qmap));
if (mux_id >= RMNET_MAX_LOGICAL_EP)
return RX_HANDLER_CONSUMED;
ep = rmnet_get_endpoint(port, mux_id);
if (!ep)
return RX_HANDLER_CONSUMED;
vnd = ep->egress_dev;
ip_family = cmd->flow_control.ip_family;
fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
qos_id = ntohl(cmd->flow_control.qos_id);
/* Ignore the ip family and pass the sequence number for both v4 and v6
* sequence. User space does not support creating dedicated flows for
* the 2 protocols
*/
r = rmnet_vnd_do_flow_control(vnd, enable);
if (r)
return RMNET_MAP_COMMAND_UNSUPPORTED;
else
return RMNET_MAP_COMMAND_ACK;
}
static void rmnet_frag_send_ack(struct rmnet_map_header *qmap,
unsigned char type,
struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
struct net_device *dev = port->dev;
struct sk_buff *skb;
u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap);
skb = alloc_skb(alloc_len, GFP_ATOMIC);
if (!skb)
return;
skb->protocol = htons(ETH_P_MAP);
skb->dev = dev;
cmd = rmnet_map_get_cmd_start(skb);
cmd->cmd_type = type & 0x03;
netif_tx_lock(dev);
dev->netdev_ops->ndo_start_xmit(skb, dev);
netif_tx_unlock(dev);
}
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
* name is decoded here and appropriate handler is called.
*/
static void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
unsigned char command_name;
unsigned char rc = 0;
cmd = (struct rmnet_map_control_command *)
((char *)qmap + sizeof(*qmap));
command_name = cmd->command_name;
switch (command_name) {
case RMNET_MAP_COMMAND_FLOW_ENABLE:
rc = rmnet_frag_do_flow_control(qmap, port, 1);
break;
case RMNET_MAP_COMMAND_FLOW_DISABLE:
rc = rmnet_frag_do_flow_control(qmap, port, 0);
break;
default:
rc = RMNET_MAP_COMMAND_UNSUPPORTED;
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
rmnet_frag_send_ack(qmap, rc, port);
}
static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
struct list_head *list)
{
struct rmnet_map_header *maph;
u8 *data = skb_frag_address(frag);
u32 offset = 0;
u32 packet_len;
while (offset < skb_frag_size(frag)) {
maph = (struct rmnet_map_header *)data;
packet_len = ntohs(maph->pkt_len);
/* Some hardware can send us empty frames. Catch them */
if (packet_len == 0)
return;
packet_len += sizeof(*maph);
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
WARN_ON(1);
} else if (port->data_format &
(RMNET_FLAGS_INGRESS_MAP_CKSUMV5 |
RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) {
u32 hsize = 0;
u8 type;
type = ((struct rmnet_map_v5_coal_header *)
(data + sizeof(*maph)))->header_type;
switch (type) {
case RMNET_MAP_HEADER_TYPE_COALESCING:
hsize = sizeof(struct rmnet_map_v5_coal_header);
break;
case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
hsize = sizeof(struct rmnet_map_v5_csum_header);
break;
}
packet_len += hsize;
}
else {
//qmap_hex_dump(__func__, data, 64);
WARN_ON(1);
}
if ((int)skb_frag_size(frag) - (int)packet_len < 0)
return;
rmnet_descriptor_add_frag(port, list, skb_frag_page(frag),
frag->page_offset + offset,
packet_len);
offset += packet_len;
data += packet_len;
}
}
/* Allocate and populate an skb to contain the packet represented by the
* frag descriptor.
*/
static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct sk_buff *head_skb, *current_skb, *skb;
struct skb_shared_info *shinfo;
struct rmnet_frag_descriptor *sub_frag, *tmp;
/* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
if (frag_desc->hdrs_valid) {
u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
head_skb = alloc_skb(hdr_len + RMNET_MAP_DESC_HEADROOM,
GFP_ATOMIC);
if (!head_skb)
return NULL;
skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM);
skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len);
skb_reset_network_header(head_skb);
if (frag_desc->trans_len)
skb_set_transport_header(head_skb, frag_desc->ip_len);
/* Packets that have no data portion don't need any frags */
if (hdr_len == skb_frag_size(&frag_desc->frag))
goto skip_frags;
/* If the headers we added are the start of the page,
* we don't want to add them twice
*/
if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) {
if (!rmnet_frag_pull(frag_desc, port, hdr_len)) {
kfree_skb(head_skb);
return NULL;
}
}
} else {
/* Allocate enough space to avoid penalties in the stack
* from __pskb_pull_tail()
*/
head_skb = alloc_skb(256 + RMNET_MAP_DESC_HEADROOM,
GFP_ATOMIC);
if (!head_skb)
return NULL;
skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM);
}
/* Add main fragment */
get_page(skb_frag_page(&frag_desc->frag));
skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag),
frag_desc->frag.page_offset,
skb_frag_size(&frag_desc->frag),
skb_frag_size(&frag_desc->frag));
shinfo = skb_shinfo(head_skb);
current_skb = head_skb;
/* Add in any frags from rmnet_perf */
list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) {
skb_frag_t *frag;
u32 frag_size;
frag = &sub_frag->frag;
frag_size = skb_frag_size(frag);
add_frag:
if (shinfo->nr_frags < MAX_SKB_FRAGS) {
get_page(skb_frag_page(frag));
skb_add_rx_frag(current_skb, shinfo->nr_frags,
skb_frag_page(frag), frag->page_offset,
frag_size, frag_size);
if (current_skb != head_skb) {
head_skb->len += frag_size;
head_skb->data_len += frag_size;
}
} else {
/* Alloc a new skb and try again */
skb = alloc_skb(0, GFP_ATOMIC);
if (!skb)
break;
if (current_skb == head_skb)
shinfo->frag_list = skb;
else
current_skb->next = skb;
current_skb = skb;
shinfo = skb_shinfo(current_skb);
goto add_frag;
}
rmnet_recycle_frag_descriptor(sub_frag, port);
}
skip_frags:
head_skb->dev = frag_desc->dev;
rmnet_set_skb_proto(head_skb);
/* Handle any header metadata that needs to be updated after RSB/RSC
* segmentation
*/
if (frag_desc->ip_id_set) {
struct iphdr *iph;
iph = (struct iphdr *)rmnet_map_data_ptr(head_skb);
csum_replace2(&iph->check, iph->id, frag_desc->ip_id);
iph->id = frag_desc->ip_id;
}
if (frag_desc->tcp_seq_set) {
struct tcphdr *th;
th = (struct tcphdr *)
(rmnet_map_data_ptr(head_skb) + frag_desc->ip_len);
th->seq = frag_desc->tcp_seq;
}
/* Handle csum offloading */
if (frag_desc->csum_valid && frag_desc->hdrs_valid) {
/* Set the partial checksum information */
//rmnet_frag_partial_csum(head_skb, frag_desc);
WARN_ON(1);
} else if (frag_desc->csum_valid) {
/* Non-RSB/RSC/perf packet. The current checksum is fine */
head_skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if (frag_desc->hdrs_valid &&
(frag_desc->trans_proto == IPPROTO_TCP ||
frag_desc->trans_proto == IPPROTO_UDP)) {
/* Unfortunately, we have to fake a bad checksum here, since
* the original bad value is lost by the hardware. The only
* reliable way to do it is to calculate the actual checksum
* and corrupt it.
*/
__sum16 *check;
__wsum csum;
unsigned int offset = skb_transport_offset(head_skb);
__sum16 pseudo;
WARN_ON(1);
/* Calculate pseudo header and update header fields */
if (frag_desc->ip_proto == 4) {
struct iphdr *iph = ip_hdr(head_skb);
__be16 tot_len = htons(head_skb->len);
csum_replace2(&iph->check, iph->tot_len, tot_len);
iph->tot_len = tot_len;
pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
head_skb->len -
frag_desc->ip_len,
frag_desc->trans_proto, 0);
} else {
struct ipv6hdr *ip6h = ipv6_hdr(head_skb);
ip6h->payload_len = htons(head_skb->len -
sizeof(*ip6h));
pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
head_skb->len -
frag_desc->ip_len,
frag_desc->trans_proto, 0);
}
if (frag_desc->trans_proto == IPPROTO_TCP) {
check = &tcp_hdr(head_skb)->check;
} else {
udp_hdr(head_skb)->len = htons(head_skb->len -
frag_desc->ip_len);
check = &udp_hdr(head_skb)->check;
}
*check = pseudo;
csum = skb_checksum(head_skb, offset, head_skb->len - offset,
0);
/* Add 1 to corrupt. This cannot produce a final value of 0
* since csum_fold() can't return a value of 0xFFFF
*/
*check = csum16_add(csum_fold(csum), htons(1));
head_skb->ip_summed = CHECKSUM_NONE;
}
/* Handle any rmnet_perf metadata */
if (frag_desc->hash) {
head_skb->hash = frag_desc->hash;
head_skb->sw_hash = 1;
}
if (frag_desc->flush_shs)
head_skb->cb[0] = 1;
/* Handle coalesced packets */
//if (frag_desc->gso_segs > 1)
// rmnet_frag_gso_stamp(head_skb, frag_desc);
return head_skb;
}
/* Deliver the packets contained within a frag descriptor */
static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct sk_buff *skb;
skb = rmnet_alloc_skb(frag_desc, port);
if (skb)
rmnet_deliver_skb(skb, port);
rmnet_recycle_frag_descriptor(frag_desc, port);
}
/* Process a QMAPv5 packet header */
static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
struct list_head *list,
u16 len)
{
int rc = 0;
switch (rmnet_frag_get_next_hdr_type(frag_desc)) {
case RMNET_MAP_HEADER_TYPE_COALESCING:
rc = -1;
WARN_ON(1);
break;
case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
if (rmnet_frag_get_csum_valid(frag_desc)) {
frag_desc->csum_valid = true;
} else {
}
if (!rmnet_frag_pull(frag_desc, port,
sizeof(struct rmnet_map_header) +
sizeof(struct rmnet_map_v5_csum_header))) {
rc = -EINVAL;
break;
}
frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
/* Remove padding only for csum offload packets.
* Coalesced packets should never have padding.
*/
if (!rmnet_frag_trim(frag_desc, port, len)) {
rc = -EINVAL;
break;
}
list_del_init(&frag_desc->list);
list_add_tail(&frag_desc->list, list);
break;
default:
//qmap_hex_dump(__func__, rmnet_frag_data_ptr(frag_desc), 64);
rc = -EINVAL;
break;
}
return rc;
}
static void
__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port)
{
struct rmnet_map_header *qmap;
struct rmnet_endpoint *ep;
struct rmnet_frag_descriptor *frag, *tmp;
LIST_HEAD(segs);
u16 len, pad;
u8 mux_id;
qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag);
mux_id = qmap->mux_id;
pad = qmap->pad_len;
len = ntohs(qmap->pkt_len) - pad;
if (qmap->cd_bit) {
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
//rmnet_frag_flow_command(qmap, port, len);
goto recycle;
}
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
rmnet_frag_command(qmap, port);
goto recycle;
}
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto recycle;
ep = rmnet_get_endpoint(port, mux_id);
if (!ep)
goto recycle;
frag_desc->dev = ep->egress_dev;
/* Handle QMAPv5 packet */
if (qmap->next_hdr &&
(port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs,
len))
goto recycle;
} else {
/* We only have the main QMAP header to worry about */
if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap)))
return;
frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc);
if (!rmnet_frag_trim(frag_desc, port, len))
return;
list_add_tail(&frag_desc->list, &segs);
}
list_for_each_entry_safe(frag, tmp, &segs, list) {
list_del_init(&frag->list);
rmnet_frag_deliver(frag, port);
}
return;
recycle:
rmnet_recycle_frag_descriptor(frag_desc, port);
}
static void rmnet_frag_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
LIST_HEAD(desc_list);
int i = 0;
struct rmnet_nss_cb *nss_cb;
/* Deaggregation and freeing of HW originating
* buffers is done within here
*/
while (skb) {
struct sk_buff *skb_frag;
port->chain_head = NULL;
port->chain_tail = NULL;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
rmnet_frag_deaggregate(&skb_shinfo(skb)->frags[i], port,
&desc_list);
if (!list_empty(&desc_list)) {
struct rmnet_frag_descriptor *frag_desc, *tmp;
list_for_each_entry_safe(frag_desc, tmp,
&desc_list, list) {
list_del_init(&frag_desc->list);
__rmnet_frag_ingress_handler(frag_desc,
port);
}
}
}
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb && port->chain_head) {
port->chain_head->cb[0] = 0;
netif_receive_skb(port->chain_head);
}
skb_frag = skb_shinfo(skb)->frag_list;
skb_shinfo(skb)->frag_list = NULL;
consume_skb(skb);
skb = skb_frag;
}
}
void rmnet_descriptor_deinit(struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool;
struct rmnet_frag_descriptor *frag_desc, *tmp;
pool = port->frag_desc_pool;
list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) {
kfree(frag_desc);
pool->pool_size--;
}
kfree(pool);
}
int rmnet_descriptor_init(struct rmnet_port *port)
{
struct rmnet_frag_descriptor_pool *pool;
int i;
spin_lock_init(&port->desc_pool_lock);
pool = kzalloc(sizeof(*pool), GFP_ATOMIC);
if (!pool)
return -ENOMEM;
INIT_LIST_HEAD(&pool->free_list);
port->frag_desc_pool = pool;
for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) {
struct rmnet_frag_descriptor *frag_desc;
frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC);
if (!frag_desc)
return -ENOMEM;
INIT_LIST_HEAD(&frag_desc->list);
INIT_LIST_HEAD(&frag_desc->sub_frags);
list_add_tail(&frag_desc->list, &pool->free_list);
pool->pool_size++;
}
return 0;
}

View File

@ -0,0 +1,146 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Packet Descriptor Framework
*
*/
#ifndef _RMNET_DESCRIPTOR_H_
#define _RMNET_DESCRIPTOR_H_
#include <linux/netdevice.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
struct rmnet_frag_descriptor_pool {
struct list_head free_list;
u32 pool_size;
};
struct rmnet_frag_descriptor {
struct list_head list;
struct list_head sub_frags;
skb_frag_t frag;
u8 *hdr_ptr;
struct net_device *dev;
u32 hash;
__be32 tcp_seq;
__be16 ip_id;
u16 data_offset;
u16 gso_size;
u16 gso_segs;
u16 ip_len;
u16 trans_len;
u8 ip_proto;
u8 trans_proto;
u8 pkt_id;
u8 csum_valid:1,
hdrs_valid:1,
ip_id_set:1,
tcp_seq_set:1,
flush_shs:1,
reserved:3;
};
/* Descriptor management */
static struct rmnet_frag_descriptor *
rmnet_get_frag_descriptor(struct rmnet_port *port);
static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port);
static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list,
struct page *p, u32 page_offset, u32 len);
/* QMAP command packets */
/* Ingress data handlers */
static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port,
struct list_head *list);
static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port);
static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
struct list_head *list,
u16 len);
static void rmnet_frag_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port);
static int rmnet_descriptor_init(struct rmnet_port *port);
static void rmnet_descriptor_deinit(struct rmnet_port *port);
static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc)
{
return skb_frag_address(&frag_desc->frag);
}
static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
unsigned int size)
{
if (size >= skb_frag_size(&frag_desc->frag)) {
pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n",
__func__, size, skb_frag_size(&frag_desc->frag));
rmnet_recycle_frag_descriptor(frag_desc, port);
return NULL;
}
frag_desc->frag.page_offset += size;
skb_frag_size_sub(&frag_desc->frag, size);
return rmnet_frag_data_ptr(frag_desc);
}
static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc,
struct rmnet_port *port,
unsigned int size)
{
if (!size) {
pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n",
__func__, skb_frag_size(&frag_desc->frag));
rmnet_recycle_frag_descriptor(frag_desc, port);
return NULL;
}
if (size < skb_frag_size(&frag_desc->frag))
skb_frag_size_set(&frag_desc->frag, size);
return rmnet_frag_data_ptr(frag_desc);
}
static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc,
struct page *p, u32 page_offset, u32 len)
{
get_page(p);
__skb_frag_set_page(&frag_desc->frag, p);
skb_frag_size_set(&frag_desc->frag, len);
frag_desc->frag.page_offset = page_offset;
}
static inline u8
rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc)
{
unsigned char *data = rmnet_frag_data_ptr(frag_desc);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_coal_header *)data)->header_type;
}
static inline bool
rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc)
{
unsigned char *data = rmnet_frag_data_ptr(frag_desc);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}
#endif /* _RMNET_DESCRIPTOR_H_ */

View File

@ -0,0 +1,374 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data ingress/egress handler
*
*/
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/sock.h>
#include <linux/tracepoint.h>
#include "rmnet_private.h"
#include "rmnet_config.h"
#include "rmnet_vnd.h"
#include "rmnet_map.h"
#include "rmnet_handlers.h"
#include "rmnet_descriptor.h"
#define RMNET_IP_VERSION_4 0x40
#define RMNET_IP_VERSION_6 0x60
/* Helper Functions */
static void rmnet_set_skb_proto(struct sk_buff *skb)
{
switch (rmnet_map_data_ptr(skb)[0] & 0xF0) {
case RMNET_IP_VERSION_4:
skb->protocol = htons(ETH_P_IP);
break;
case RMNET_IP_VERSION_6:
skb->protocol = htons(ETH_P_IPV6);
break;
default:
skb->protocol = htons(ETH_P_MAP);
break;
}
}
/* Generic handler */
static void
rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
{
struct rmnet_nss_cb *nss_cb;
rmnet_vnd_rx_fixup(skb->dev, skb->len);
/* Pass off the packet to NSS driver if we can */
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb) {
if (!port->chain_head)
port->chain_head = skb;
else
skb_shinfo(port->chain_tail)->frag_list = skb;
port->chain_tail = skb;
return;
}
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->pkt_type = PACKET_HOST;
skb_set_mac_header(skb, 0);
//if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
//} else {
//if (!rmnet_check_skb_can_gro(skb))
// gro_cells_receive(&priv->gro_cells, skb);
//else
netif_receive_skb(skb);
//}
}
/* Deliver a list of skbs after undoing coalescing */
static void rmnet_deliver_skb_list(struct sk_buff_head *head,
struct rmnet_port *port)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(head))) {
rmnet_set_skb_proto(skb);
rmnet_deliver_skb(skb, port);
}
}
/* MAP handler */
static void
_rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_map_header *qmap;
struct rmnet_endpoint *ep;
struct sk_buff_head list;
u16 len, pad;
u8 mux_id;
/* We don't need the spinlock since only we touch this */
__skb_queue_head_init(&list);
qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
if (qmap->cd_bit) {
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
//if (!rmnet_map_flow_command(skb, port, false))
return;
}
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
return rmnet_map_command(skb, port);
goto free_skb;
}
mux_id = qmap->mux_id;
pad = qmap->pad_len;
len = ntohs(qmap->pkt_len) - pad;
if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto free_skb;
ep = rmnet_get_endpoint(port, mux_id);
if (!ep)
goto free_skb;
skb->dev = ep->egress_dev;
/* Handle QMAPv5 packet */
if (qmap->next_hdr &&
(port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
if (rmnet_map_process_next_hdr_packet(skb, &list, len))
goto free_skb;
} else {
/* We only have the main QMAP header to worry about */
pskb_pull(skb, sizeof(*qmap));
rmnet_set_skb_proto(skb);
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
//if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
// skb->ip_summed = CHECKSUM_UNNECESSARY;
}
pskb_trim(skb, len);
/* Push the single packet onto the list */
__skb_queue_tail(&list, skb);
}
rmnet_deliver_skb_list(&list, port);
return;
free_skb:
kfree_skb(skb);
}
static void
rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct sk_buff *skbn;
if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) {
if (skb_is_nonlinear(skb)) {
rmnet_frag_ingress_handler(skb, port);
return;
}
}
/* Deaggregation and freeing of HW originating
* buffers is done within here
*/
while (skb) {
struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
skb_shinfo(skb)->frag_list = NULL;
while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) {
_rmnet_map_ingress_handler(skbn, port);
if (skbn == skb)
goto next_skb;
}
consume_skb(skb);
next_skb:
skb = skb_frag;
}
}
static int rmnet_map_egress_handler(struct sk_buff *skb,
struct rmnet_port *port, u8 mux_id,
struct net_device *orig_dev)
{
int required_headroom, additional_header_len, csum_type;
struct rmnet_map_header *map_header;
additional_header_len = 0;
required_headroom = sizeof(struct rmnet_map_header);
csum_type = 0;
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
} else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
}
required_headroom += additional_header_len;
if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
return -ENOMEM;
}
if (csum_type)
rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type);
map_header = rmnet_map_add_map_header(skb, additional_header_len, 0,
port);
if (!map_header)
return -ENOMEM;
map_header->mux_id = mux_id;
if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
if (rmnet_map_tx_agg_skip(skb, required_headroom))
goto done;
rmnet_map_tx_aggregate(skb, port);
return -EINPROGRESS;
}
done:
skb->protocol = htons(ETH_P_MAP);
return 0;
}
static void
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
if (bridge_dev) {
skb->dev = bridge_dev;
dev_queue_xmit(skb);
}
}
/* Ingress / Egress Entry Points */
/* Processes packet as per ingress data format for receiving device. Logical
* endpoint is determined from packet inspection. Packet is then sent to the
* egress device listed in the logical endpoint configuration.
*/
static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct rmnet_port *port;
struct net_device *dev;
if (!skb)
goto done;
if (skb->pkt_type == PACKET_LOOPBACK)
return RX_HANDLER_PASS;
dev = skb->dev;
port = rmnet_get_port(dev);
port->chain_head = NULL;
port->chain_tail = NULL;
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
rmnet_map_ingress_handler(skb, port);
break;
case RMNET_EPMODE_BRIDGE:
rmnet_bridge_handler(skb, port->bridge_ep);
break;
}
done:
return RX_HANDLER_CONSUMED;
}
static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct rmnet_nss_cb *nss_cb;
if (!skb)
return RX_HANDLER_CONSUMED;
if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len);
if (skb->pkt_type == PACKET_LOOPBACK)
return RX_HANDLER_PASS;
/* Check this so that we dont loop around netif_receive_skb */
if (skb->cb[0] == 1) {
skb->cb[0] = 0;
skb->dev->stats.rx_packets++;
return RX_HANDLER_PASS;
}
while (skb) {
struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;
skb_shinfo(skb)->frag_list = NULL;
nss_cb = rcu_dereference(rmnet_nss_callbacks);
if (nss_cb)
nss_cb->nss_tx(skb);
skb = skb_frag;
}
return RX_HANDLER_CONSUMED;
}
/* Modifies packet as per logical endpoint configuration and egress data format
* for egress device configured in logical endpoint. Packet is then transmitted
* on the egress device.
*/
static void rmnet_egress_handler(struct sk_buff *skb)
{
struct net_device *orig_dev;
struct rmnet_port *port;
struct rmnet_priv *priv;
u8 mux_id;
int err;
u32 skb_len;
skb_orphan(skb);
orig_dev = skb->dev;
priv = netdev_priv(orig_dev);
skb->dev = priv->real_dev;
mux_id = priv->mux_id;
port = rmnet_get_port(skb->dev);
if (!port)
goto drop;
skb_len = skb->len;
err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
if (err == -ENOMEM)
goto drop;
else if (err == -EINPROGRESS) {
rmnet_vnd_tx_fixup(orig_dev, skb_len);
return;
}
rmnet_vnd_tx_fixup(orig_dev, skb_len);
dev_queue_xmit(skb);
return;
drop:
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
}

View File

@ -0,0 +1,32 @@
/* Copyright (c) 2013, 2016-2017, 2019
* The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data ingress/egress handler
*
*/
#ifndef _RMNET_HANDLERS_H_
#define _RMNET_HANDLERS_H_
#include "rmnet_config.h"
enum rmnet_packet_context {
RMNET_NET_RX_CTX,
RMNET_WQ_CTX,
};
static void rmnet_egress_handler(struct sk_buff *skb);
static void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
static void rmnet_set_skb_proto(struct sk_buff *skb);
static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb);
#endif /* _RMNET_HANDLERS_H_ */

View File

@ -0,0 +1,272 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _RMNET_MAP_H_
#define _RMNET_MAP_H_
#include <linux/skbuff.h>
#include "rmnet_config.h"
struct rmnet_map_control_command {
u8 command_name;
u8 cmd_type:2;
u8 reserved:6;
u16 reserved2;
u32 transaction_id;
union {
struct {
u16 ip_family:2;
u16 reserved:14;
__be16 flow_control_seq_num;
__be32 qos_id;
} flow_control;
u8 data[0];
};
} __aligned(1);
enum rmnet_map_commands {
RMNET_MAP_COMMAND_NONE,
RMNET_MAP_COMMAND_FLOW_DISABLE,
RMNET_MAP_COMMAND_FLOW_ENABLE,
RMNET_MAP_COMMAND_FLOW_START = 7,
RMNET_MAP_COMMAND_FLOW_END = 8,
/* These should always be the last 2 elements */
RMNET_MAP_COMMAND_UNKNOWN,
RMNET_MAP_COMMAND_ENUM_LENGTH
};
enum rmnet_map_v5_header_type {
RMNET_MAP_HEADER_TYPE_UNKNOWN,
RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
};
enum rmnet_map_v5_close_type {
RMNET_MAP_COAL_CLOSE_NON_COAL,
RMNET_MAP_COAL_CLOSE_IP_MISS,
RMNET_MAP_COAL_CLOSE_TRANS_MISS,
RMNET_MAP_COAL_CLOSE_HW,
RMNET_MAP_COAL_CLOSE_COAL,
};
enum rmnet_map_v5_close_value {
RMNET_MAP_COAL_CLOSE_HW_NL,
RMNET_MAP_COAL_CLOSE_HW_PKT,
RMNET_MAP_COAL_CLOSE_HW_BYTE,
RMNET_MAP_COAL_CLOSE_HW_TIME,
RMNET_MAP_COAL_CLOSE_HW_EVICT,
};
/* Main QMAP header */
struct rmnet_map_header {
u8 pad_len:6;
u8 next_hdr:1;
u8 cd_bit:1;
u8 mux_id;
__be16 pkt_len;
} __aligned(1);
/* QMAP v5 headers */
struct rmnet_map_v5_csum_header {
u8 next_hdr:1;
u8 header_type:7;
u8 hw_reserved:7;
u8 csum_valid_required:1;
__be16 reserved;
} __aligned(1);
struct rmnet_map_v5_nl_pair {
__be16 pkt_len;
u8 csum_error_bitmap;
u8 num_packets;
} __aligned(1);
/* NLO: Number-length object */
#define RMNET_MAP_V5_MAX_NLOS (6)
#define RMNET_MAP_V5_MAX_PACKETS (48)
struct rmnet_map_v5_coal_header {
u8 next_hdr:1;
u8 header_type:7;
u8 reserved1:4;
u8 num_nlos:3;
u8 csum_valid:1;
u8 close_type:4;
u8 close_value:4;
u8 reserved2:4;
u8 virtual_channel_id:4;
struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS];
} __aligned(1);
/* QMAP v4 headers */
struct rmnet_map_dl_csum_trailer {
u8 reserved1;
u8 valid:1;
u8 reserved2:7;
u16 csum_start_offset;
u16 csum_length;
__be16 csum_value;
} __aligned(1);
struct rmnet_map_ul_csum_header {
__be16 csum_start_offset;
u16 csum_insert_offset:14;
u16 udp_ind:1;
u16 csum_enabled:1;
} __aligned(1);
struct rmnet_map_control_command_header {
u8 command_name;
u8 cmd_type:2;
u8 reserved:5;
u8 e:1;
u16 source_id:15;
u16 ext:1;
u32 transaction_id;
} __aligned(1);
struct rmnet_map_flow_info_le {
__be32 mux_id;
__be32 flow_id;
__be32 bytes;
__be32 pkts;
} __aligned(1);
struct rmnet_map_flow_info_be {
u32 mux_id;
u32 flow_id;
u32 bytes;
u32 pkts;
} __aligned(1);
struct rmnet_map_dl_ind_hdr {
union {
struct {
u32 seq;
u32 bytes;
u32 pkts;
u32 flows;
struct rmnet_map_flow_info_le flow[0];
} le __aligned(1);
struct {
__be32 seq;
__be32 bytes;
__be32 pkts;
__be32 flows;
struct rmnet_map_flow_info_be flow[0];
} be __aligned(1);
} __aligned(1);
} __aligned(1);
struct rmnet_map_dl_ind_trl {
union {
__be32 seq_be;
u32 seq_le;
} __aligned(1);
} __aligned(1);
struct rmnet_map_dl_ind {
u8 priority;
union {
void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *);
void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *,
struct
rmnet_map_control_command_header *);
} __aligned(1);
union {
void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *);
void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *,
struct
rmnet_map_control_command_header *);
} __aligned(1);
struct list_head list;
};
#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
(Y)->data)->mux_id)
#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
(Y)->data)->cd_bit)
#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \
(Y)->data)->pad_len)
#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \
((Y)->data + \
sizeof(struct rmnet_map_header)))
#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
(Y)->data)->pkt_len))
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
#define RMNET_MAP_DESC_HEADROOM 128
#define RMNET_MAP_COMMAND_REQUEST 0
#define RMNET_MAP_COMMAND_ACK 1
#define RMNET_MAP_COMMAND_UNSUPPORTED 2
#define RMNET_MAP_COMMAND_INVALID 3
#define RMNET_MAP_NO_PAD_BYTES 0
#define RMNET_MAP_ADD_PAD_BYTES 1
static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb)
{
/* Nonlinear packets we receive are entirely within frag 0 */
if (skb_is_nonlinear(skb) && skb->len == skb->data_len)
return skb_frag_address(skb_shinfo(skb)->frags);
return skb->data;
}
static inline struct rmnet_map_control_command *
rmnet_map_get_cmd_start(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return (struct rmnet_map_control_command *)data;
}
static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_coal_header *)data)->header_type;
}
static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
{
unsigned char *data = rmnet_map_data_ptr(skb);
data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}
static struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port);
static struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad,
struct rmnet_port *port);
static void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
static void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev,
int csum_type);
static int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
struct sk_buff_head *list,
u16 len);
static int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
static void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
static void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
static void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
static void rmnet_map_cmd_init(struct rmnet_port *port);
static void rmnet_map_cmd_exit(struct rmnet_port *port);
#endif /* _RMNET_MAP_H_ */

View File

@ -0,0 +1,143 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/netdevice.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
#include "rmnet_vnd.h"
#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
sizeof(struct rmnet_map_header) + \
sizeof(struct rmnet_map_control_command_header))
static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
struct rmnet_port *port,
int enable)
{
struct rmnet_map_header *qmap;
struct rmnet_map_control_command *cmd;
struct rmnet_endpoint *ep;
struct net_device *vnd;
u16 ip_family;
u16 fc_seq;
u32 qos_id;
u8 mux_id;
int r;
qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
mux_id = qmap->mux_id;
cmd = rmnet_map_get_cmd_start(skb);
if (mux_id >= RMNET_MAX_LOGICAL_EP) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
ep = rmnet_get_endpoint(port, mux_id);
if (!ep) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
vnd = ep->egress_dev;
ip_family = cmd->flow_control.ip_family;
fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
qos_id = ntohl(cmd->flow_control.qos_id);
/* Ignore the ip family and pass the sequence number for both v4 and v6
* sequence. User space does not support creating dedicated flows for
* the 2 protocols
*/
r = rmnet_vnd_do_flow_control(vnd, enable);
if (r) {
kfree_skb(skb);
return RMNET_MAP_COMMAND_UNSUPPORTED;
} else {
return RMNET_MAP_COMMAND_ACK;
}
}
static void rmnet_map_send_ack(struct sk_buff *skb,
unsigned char type,
struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
struct net_device *dev = skb->dev;
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
pskb_trim(skb,
skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
skb->protocol = htons(ETH_P_MAP);
cmd = rmnet_map_get_cmd_start(skb);
cmd->cmd_type = type & 0x03;
netif_tx_lock(dev);
dev->netdev_ops->ndo_start_xmit(skb, dev);
netif_tx_unlock(dev);
}
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
* name is decoded here and appropriate handler is called.
*/
static void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
{
struct rmnet_map_control_command *cmd;
unsigned char command_name;
unsigned char rc = 0;
cmd = rmnet_map_get_cmd_start(skb);
command_name = cmd->command_name;
switch (command_name) {
case RMNET_MAP_COMMAND_FLOW_ENABLE:
rc = rmnet_map_do_flow_control(skb, port, 1);
break;
case RMNET_MAP_COMMAND_FLOW_DISABLE:
rc = rmnet_map_do_flow_control(skb, port, 0);
break;
default:
rc = RMNET_MAP_COMMAND_UNSUPPORTED;
kfree_skb(skb);
break;
}
if (rc == RMNET_MAP_COMMAND_ACK)
rmnet_map_send_ack(skb, rc, port);
}
static void rmnet_map_cmd_exit(struct rmnet_port *port)
{
struct rmnet_map_dl_ind *tmp, *idx;
list_for_each_entry_safe(tmp, idx, &port->dl_list, list)
list_del_rcu(&tmp->list);
}
static void rmnet_map_cmd_init(struct rmnet_port *port)
{
INIT_LIST_HEAD(&port->dl_list);
port->dl_marker_flush = -1;
}

View File

@ -0,0 +1,682 @@
/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* RMNET Data MAP protocol
*
*/
#include <linux/netdevice.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
#include "rmnet_handlers.h"
#define RMNET_MAP_PKT_COPY_THRESHOLD 64
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
struct rmnet_map_coal_metadata {
void *ip_header;
void *trans_header;
u16 ip_len;
u16 trans_len;
u16 data_offset;
u16 data_len;
u8 ip_proto;
u8 trans_proto;
u8 pkt_id;
u8 pkt_count;
};
static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
const void *txporthdr)
{
__sum16 *check = NULL;
switch (protocol) {
case IPPROTO_TCP:
check = &(((struct tcphdr *)txporthdr)->check);
break;
case IPPROTO_UDP:
check = &(((struct udphdr *)txporthdr)->check);
break;
default:
check = NULL;
break;
}
return check;
}
static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
{
struct iphdr *ip4h = (struct iphdr *)iphdr;
void *txphdr;
u16 *csum;
txphdr = iphdr + ip4h->ihl * 4;
if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
*csum = ~(*csum);
}
}
static void
rmnet_map_ipv4_ul_csum_header(void *iphdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
struct iphdr *ip4h = (struct iphdr *)iphdr;
__be16 *hdr = (__be16 *)ul_header, offset;
offset = htons((__force u16)(skb_transport_header(skb) -
(unsigned char *)iphdr));
ul_header->csum_start_offset = offset;
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
if (ip4h->protocol == IPPROTO_UDP)
ul_header->udp_ind = 1;
else
ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
*hdr = htons((__force u16)*hdr);
skb->ip_summed = CHECKSUM_NONE;
rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
}
#if IS_ENABLED(CONFIG_IPV6)
static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
void *txphdr;
u16 *csum;
txphdr = ip6hdr + sizeof(struct ipv6hdr);
if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
*csum = ~(*csum);
}
}
static void
rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
__be16 *hdr = (__be16 *)ul_header, offset;
offset = htons((__force u16)(skb_transport_header(skb) -
(unsigned char *)ip6hdr));
ul_header->csum_start_offset = offset;
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
if (ip6h->nexthdr == IPPROTO_UDP)
ul_header->udp_ind = 1;
else
ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
*hdr = htons((__force u16)*hdr);
skb->ip_summed = CHECKSUM_NONE;
rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
}
#endif
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
*/
static struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad,
struct rmnet_port *port)
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
u8 *padbytes;
map_datalen = skb->len - hdrlen;
map_header = (struct rmnet_map_header *)
skb_push(skb, sizeof(struct rmnet_map_header));
memset(map_header, 0, sizeof(struct rmnet_map_header));
/* Set next_hdr bit for csum offload packets */
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
map_header->next_hdr = 1;
if (pad == RMNET_MAP_NO_PAD_BYTES) {
map_header->pkt_len = htons(map_datalen);
return map_header;
}
padding = ALIGN(map_datalen, 4) - map_datalen;
if (padding == 0)
goto done;
if (skb_tailroom(skb) < padding)
return NULL;
padbytes = (u8 *)skb_put(skb, padding);
memset(padbytes, 0, padding);
done:
map_header->pkt_len = htons(map_datalen + padding);
map_header->pad_len = padding & 0x3F;
return map_header;
}
/* Deaggregates a single packet
* A whole new buffer is allocated for each portion of an aggregated frame.
* Caller should keep calling deaggregate() on the source skb until 0 is
* returned, indicating that there are no more packets to deaggregate. Caller
* is responsible for freeing the original skb.
*/
static struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_map_header *maph;
struct sk_buff *skbn;
unsigned char *data = rmnet_map_data_ptr(skb), *next_hdr = NULL;
u32 packet_len;
if (skb->len == 0)
return NULL;
maph = (struct rmnet_map_header *)data;
packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
if (!maph->cd_bit) {
packet_len += sizeof(struct rmnet_map_v5_csum_header);
/* Coalescing headers require MAPv5 */
next_hdr = data + sizeof(*maph);
}
}
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
/* Some hardware can send us empty frames. Catch them */
if (ntohs(maph->pkt_len) == 0)
return NULL;
if (next_hdr &&
((struct rmnet_map_v5_coal_header *)next_hdr)->header_type ==
RMNET_MAP_HEADER_TYPE_COALESCING)
return skb;
if (skb_is_nonlinear(skb)) {
skb_frag_t *frag0 = skb_shinfo(skb)->frags;
struct page *page = skb_frag_page(frag0);
skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC);
if (!skbn)
return NULL;
skb_append_pagefrags(skbn, page, frag0->page_offset,
packet_len);
skbn->data_len += packet_len;
skbn->len += packet_len;
} else {
skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
GFP_ATOMIC);
if (!skbn)
return NULL;
skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
skb_put(skbn, packet_len);
memcpy(skbn->data, data, packet_len);
}
pskb_pull(skb, packet_len);
return skbn;
}
static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_ul_csum_header *ul_header;
void *iphdr;
ul_header = (struct rmnet_map_ul_csum_header *)
skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
if (unlikely(!(orig_dev->features &
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
goto sw_csum;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
iphdr = (char *)ul_header +
sizeof(struct rmnet_map_ul_csum_header);
if (skb->protocol == htons(ETH_P_IP)) {
rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
#else
priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
#endif
} else {
priv->stats.csum_err_invalid_ip_version++;
}
}
sw_csum:
ul_header->csum_start_offset = 0;
ul_header->csum_insert_offset = 0;
ul_header->csum_enabled = 0;
ul_header->udp_ind = 0;
priv->stats.csum_sw++;
}
static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_v5_csum_header *ul_header;
ul_header = (struct rmnet_map_v5_csum_header *)
skb_push(skb, sizeof(*ul_header));
memset(ul_header, 0, sizeof(*ul_header));
ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
void *iph = (char *)ul_header + sizeof(*ul_header);
void *trans;
__sum16 *check;
u8 proto;
if (skb->protocol == htons(ETH_P_IP)) {
u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
proto = ((struct iphdr *)iph)->protocol;
trans = iph + ip_len;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
u16 ip_len = sizeof(struct ipv6hdr);
proto = ((struct ipv6hdr *)iph)->nexthdr;
trans = iph + ip_len;
} else {
priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
}
check = rmnet_map_get_csum_field(proto, trans);
if (check) {
*check = 0;
skb->ip_summed = CHECKSUM_NONE;
/* Ask for checksum offloading */
ul_header->csum_valid_required = 1;
priv->stats.csum_hw++;
return;
}
}
sw_csum:
priv->stats.csum_sw++;
}
/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
* packets that are supported for UL checksum offload.
*/
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev,
int csum_type)
{
switch (csum_type) {
case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
break;
case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
rmnet_map_v5_checksum_uplink_packet(skb, orig_dev);
break;
default:
break;
}
}
static void rmnet_map_move_headers(struct sk_buff *skb)
{
struct iphdr *iph;
u16 ip_len;
u16 trans_len = 0;
u8 proto;
/* This only applies to non-linear SKBs */
if (!skb_is_nonlinear(skb))
return;
iph = (struct iphdr *)rmnet_map_data_ptr(skb);
if (iph->version == 4) {
ip_len = iph->ihl * 4;
proto = iph->protocol;
if (iph->frag_off & htons(IP_OFFSET))
/* No transport header information */
goto pull;
} else if (iph->version == 6) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
__be16 frag_off;
u8 nexthdr = ip6h->nexthdr;
ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr,
&frag_off);
if (ip_len < 0)
return;
proto = nexthdr;
} else {
return;
}
if (proto == IPPROTO_TCP) {
struct tcphdr *tp = (struct tcphdr *)((u8 *)iph + ip_len);
trans_len = tp->doff * 4;
} else if (proto == IPPROTO_UDP) {
trans_len = sizeof(struct udphdr);
} else if (proto == NEXTHDR_FRAGMENT) {
/* Non-first fragments don't have the fragment length added by
* ipv6_skip_exthdr() and sho up as proto NEXTHDR_FRAGMENT, so
* we account for the length here.
*/
ip_len += sizeof(struct frag_hdr);
}
pull:
__pskb_pull_tail(skb, ip_len + trans_len);
skb_reset_network_header(skb);
if (trans_len)
skb_set_transport_header(skb, ip_len);
}
/* Process a QMAPv5 packet header */
static int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
struct sk_buff_head *list,
u16 len)
{
struct rmnet_priv *priv = netdev_priv(skb->dev);
int rc = 0;
switch (rmnet_map_get_next_hdr_type(skb)) {
case RMNET_MAP_HEADER_TYPE_COALESCING:
priv->stats.coal.coal_rx++;
break;
case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
if (rmnet_map_get_csum_valid(skb)) {
priv->stats.csum_ok++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
priv->stats.csum_valid_unset++;
}
/* Pull unnecessary headers and move the rest to the linear
* section of the skb.
*/
pskb_pull(skb,
(sizeof(struct rmnet_map_header) +
sizeof(struct rmnet_map_v5_csum_header)));
rmnet_map_move_headers(skb);
/* Remove padding only for csum offload packets.
* Coalesced packets should never have padding.
*/
pskb_trim(skb, len);
__skb_queue_tail(list, skb);
break;
default:
rc = -EINVAL;
break;
}
return rc;
}
long rmnet_agg_time_limit __read_mostly = 1000000L;
long rmnet_agg_bypass_time __read_mostly = 10000000L;
static int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset)
{
u8 *packet_start = skb->data + offset;
int is_icmp = 0;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip4h = (struct iphdr *)(packet_start);
if (ip4h->protocol == IPPROTO_ICMP)
is_icmp = 1;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
if (ip6h->nexthdr == IPPROTO_ICMPV6) {
is_icmp = 1;
} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
struct frag_hdr *frag;
frag = (struct frag_hdr *)(packet_start
+ sizeof(struct ipv6hdr));
if (frag->nexthdr == IPPROTO_ICMPV6)
is_icmp = 1;
}
}
return is_icmp;
}
static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
{
struct sk_buff *skb = NULL;
struct rmnet_port *port;
unsigned long flags;
port = container_of(work, struct rmnet_port, agg_wq);
spin_lock_irqsave(&port->agg_lock, flags);
if (likely(port->agg_state == -EINPROGRESS)) {
/* Buffer may have already been shipped out */
if (likely(port->agg_skb)) {
skb = port->agg_skb;
port->agg_skb = NULL;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
}
port->agg_state = 0;
}
spin_unlock_irqrestore(&port->agg_lock, flags);
if (skb)
dev_queue_xmit(skb);
}
static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
{
struct rmnet_port *port;
port = container_of(t, struct rmnet_port, hrtimer);
schedule_work(&port->agg_wq);
return HRTIMER_NORESTART;
}
static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
{
unsigned int linear = src->len - src->data_len, target = src->len;
unsigned char *src_buf;
struct sk_buff *skb;
src_buf = src->data;
skb_put_data(dst, src_buf, linear);
target -= linear;
skb = src;
while (target) {
unsigned int i = 0, non_linear = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
non_linear = skb_frag_size(&skb_shinfo(skb)->frags[i]);
src_buf = skb_frag_address(&skb_shinfo(skb)->frags[i]);
skb_put_data(dst, src_buf, non_linear);
target -= non_linear;
}
if (skb_shinfo(skb)->frag_list) {
skb = skb_shinfo(skb)->frag_list;
continue;
}
if (skb->next)
skb = skb->next;
}
}
static void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
{
struct timespec diff, last;
int size, agg_count = 0;
struct sk_buff *agg_skb;
unsigned long flags;
new_packet:
spin_lock_irqsave(&port->agg_lock, flags);
memcpy(&last, &port->agg_last, sizeof(struct timespec));
getnstimeofday(&port->agg_last);
if (!port->agg_skb) {
/* Check to see if we should agg first. If the traffic is very
* sparse, don't aggregate. We will need to tune this later
*/
diff = timespec_sub(port->agg_last, last);
size = port->egress_agg_params.agg_size - skb->len;
if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
size <= 0) {
spin_unlock_irqrestore(&port->agg_lock, flags);
skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(skb);
return;
}
port->agg_skb = alloc_skb(port->egress_agg_params.agg_size,
GFP_ATOMIC);
if (!port->agg_skb) {
port->agg_skb = 0;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
spin_unlock_irqrestore(&port->agg_lock, flags);
skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(skb);
return;
}
rmnet_map_linearize_copy(port->agg_skb, skb);
port->agg_skb->dev = skb->dev;
port->agg_skb->protocol = htons(ETH_P_MAP);
port->agg_count = 1;
getnstimeofday(&port->agg_time);
dev_kfree_skb_any(skb);
goto schedule;
}
diff = timespec_sub(port->agg_last, port->agg_time);
size = port->egress_agg_params.agg_size - port->agg_skb->len;
if (skb->len > size ||
port->agg_count >= port->egress_agg_params.agg_count ||
diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
agg_skb = port->agg_skb;
agg_count = port->agg_count;
port->agg_skb = 0;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
port->agg_state = 0;
spin_unlock_irqrestore(&port->agg_lock, flags);
hrtimer_cancel(&port->hrtimer);
dev_queue_xmit(agg_skb);
goto new_packet;
}
rmnet_map_linearize_copy(port->agg_skb, skb);
port->agg_count++;
dev_kfree_skb_any(skb);
schedule:
if (port->agg_state != -EINPROGRESS) {
port->agg_state = -EINPROGRESS;
hrtimer_start(&port->hrtimer,
ns_to_ktime(port->egress_agg_params.agg_time),
HRTIMER_MODE_REL);
}
spin_unlock_irqrestore(&port->agg_lock, flags);
}
static void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
port->egress_agg_params.agg_size = 8192;
port->egress_agg_params.agg_count = 20;
port->egress_agg_params.agg_time = 3000000;
spin_lock_init(&port->agg_lock);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
}
static void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
{
unsigned long flags;
hrtimer_cancel(&port->hrtimer);
cancel_work_sync(&port->agg_wq);
spin_lock_irqsave(&port->agg_lock, flags);
if (port->agg_state == -EINPROGRESS) {
if (port->agg_skb) {
kfree_skb(port->agg_skb);
port->agg_skb = NULL;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
}
port->agg_state = 0;
}
spin_unlock_irqrestore(&port->agg_lock, flags);
}

View File

@ -0,0 +1,34 @@
/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _RMNET_PRIVATE_H_
#define _RMNET_PRIVATE_H_
#define RMNET_MAX_PACKET_SIZE 16384
#define RMNET_DFLT_PACKET_SIZE 1500
#define RMNET_NEEDED_HEADROOM 16
#define RMNET_TX_QUEUE_LEN 1000
/* Constants */
#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(31)
#define RMNET_INGRESS_FORMAT_DL_MARKER_V1 BIT(30)
#define RMNET_INGRESS_FORMAT_DL_MARKER_V2 BIT(29)
#define RMNET_INGRESS_FORMAT_DL_MARKER (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\
RMNET_INGRESS_FORMAT_DL_MARKER_V2)
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
/* Pass the frame directly to another device with dev_queue_xmit() */
#define RMNET_EPMODE_BRIDGE (2)
#endif /* _RMNET_PRIVATE_H_ */

Some files were not shown because too many files have changed in this diff Show More