diff --git a/qca-nss-ecm/Makefile b/qca-nss-ecm/Makefile index da1c487..0a55886 100644 --- a/qca-nss-ecm/Makefile +++ b/qca-nss-ecm/Makefile @@ -160,6 +160,10 @@ ifneq ($(CONFIG_PACKAGE_kmod-bonding),) ECM_MAKE_OPTS+=ECM_INTERFACE_BOND_ENABLE=y endif +ifneq ($(CONFIG_PACKAGE_kmod-qmi_wwan_q),) +ECM_MAKE_OPTS+=ECM_INTERFACE_RAWIP_ENABLE=y +endif + define Build/InstallDev mkdir -p $(1)/usr/include/qca-nss-ecm $(CP) $(PKG_BUILD_DIR)/exports/* $(1)/usr/include/qca-nss-ecm diff --git a/wwan/app/luci-app-pcimodem/Makefile b/wwan/app/luci-app-pcimodem/Makefile new file mode 100644 index 0000000..1bcb748 --- /dev/null +++ b/wwan/app/luci-app-pcimodem/Makefile @@ -0,0 +1,15 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +LUCI_TITLE:=PCI Modem Server +LUCI_DEPENDS:=+kmod-pcie_mhi +pciutils +quectel-CM-5G + +include $(TOPDIR)/feeds/luci/luci.mk + +# call BuildPackage - OpenWrt buildroot signature diff --git a/wwan/app/luci-app-pcimodem/luasrc/controller/pcimodem.lua b/wwan/app/luci-app-pcimodem/luasrc/controller/pcimodem.lua new file mode 100644 index 0000000..e1d3daf --- /dev/null +++ b/wwan/app/luci-app-pcimodem/luasrc/controller/pcimodem.lua @@ -0,0 +1,9 @@ +module("luci.controller.pcimodem", package.seeall) + +function index() + if not nixio.fs.access("/etc/config/pcimodem") then + return + end + + entry({"admin", "network", "pcimodem"}, cbi("pcimodem"), _("PCI Modem Server"), 80).dependent = false +end diff --git a/wwan/app/luci-app-pcimodem/luasrc/model/cbi/pcimodem.lua b/wwan/app/luci-app-pcimodem/luasrc/model/cbi/pcimodem.lua new file mode 100644 index 0000000..258ce66 --- /dev/null +++ b/wwan/app/luci-app-pcimodem/luasrc/model/cbi/pcimodem.lua @@ -0,0 +1,39 @@ +-- Copyright 2016 David Thornley +-- Licensed to the public under the Apache License 2.0. + +mp = Map("pcimodem") +mp.title = translate("PCI Modem Server") +mp.description = translate("Modem Server For OpenWrt") + +s = mp:section(TypedSection, "service", "Base Setting") +s.anonymous = true + +enabled = s:option(Flag, "enabled", translate("Enable")) +enabled.default = 0 +enabled.rmempty = false + +apn = s:option(Value, "apn", translate("APN")) +apn.rmempty = true + +pincode = s:option(Value, "pincode", translate("PIN")) +pincode.rmempty = true + +username = s:option(Value, "username", translate("PAP/CHAP username")) +username.rmempty = true + +password = s:option(Value, "password", translate("PAP/CHAP password")) +password.rmempty = true + +auth = s:option(Value, "auth", translate("Authentication Type")) +auth.rmempty = true +auth:value("", translate("-- Please choose --")) +auth:value("both", "PAP/CHAP (both)") +auth:value("pap", "PAP") +auth:value("chap", "CHAP") +auth:value("none", "NONE") + +tool = s:option(Value, "tool", translate("Tools")) +tool:value("quectel-CM", "quectel-CM") +tool.rmempty = true + +return mp diff --git a/wwan/app/luci-app-pcimodem/root/etc/config/pcimodem b/wwan/app/luci-app-pcimodem/root/etc/config/pcimodem new file mode 100644 index 0000000..05fad9b --- /dev/null +++ b/wwan/app/luci-app-pcimodem/root/etc/config/pcimodem @@ -0,0 +1,4 @@ +config service + option tool 'quectel-CM' + option enabled '0' + diff --git a/wwan/app/luci-app-pcimodem/root/etc/init.d/pcimodem b/wwan/app/luci-app-pcimodem/root/etc/init.d/pcimodem new file mode 100755 index 0000000..f60124f --- /dev/null +++ b/wwan/app/luci-app-pcimodem/root/etc/init.d/pcimodem @@ -0,0 +1,75 @@ +#!/bin/sh /etc/rc.common +# Copyright (C) 2006-2014 OpenWrt.org + +START=99 +STOP=16 +USE_PROCD=1 +#使用procd启动 + +run_5g() +{ + local enabled + config_get_bool enabled $1 enabled + + echo "run 4G" >> /tmp/log4g + + if [ "$enabled" = "1" ]; then + local user + local password + local apn + local auth + local pincode + local tool + + # echo "enable 5G" >> /tmp/log5g + config_get user $1 user + config_get password $1 password + config_get apn $1 apn + config_get auth $1 auth + config_get pincode $1 pincode + config_get tool $1 tool + config_get tty $1 tty + config_get atcmd $1 atcmd + + if [ "$tool" = "at" ];then + at_tool "$atcmd" -d $tty + else + procd_open_instance + #创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE? + #ubus call service list 可以查看实例 + procd_set_param command $tool -i rmnet_mhi0 -s $apn + if [ "$password" != "" ];then + procd_append_param command $user $password $auth + fi + if [ "$pincode" != "" ]; then + procd_append_param command -p $pincode + fi + # procd_append_param command -f /tmp/4g.log + procd_set_param respawn + echo "quectel-CM has started." + procd_close_instance + #关闭实例 + fi + + fi +} + + +service_triggers() +{ + procd_add_reload_trigger "pcimodem" +} + +start_service() { + config_load pcimodem + config_foreach run_5g service +} + +stop_service() +{ + echo "5G stop" >> /tmp/log5g + killall quectel-CM + echo "quectel-CM has stoped." +} + + diff --git a/wwan/app/luci-app-pcimodem/root/etc/uci-defaults/luci-pcimodem b/wwan/app/luci-app-pcimodem/root/etc/uci-defaults/luci-pcimodem new file mode 100755 index 0000000..10f5ab8 --- /dev/null +++ b/wwan/app/luci-app-pcimodem/root/etc/uci-defaults/luci-pcimodem @@ -0,0 +1,11 @@ +#!/bin/sh + +uci -q batch <<-EOF >/dev/null + delete ucitrack.@pcimodem[-1] + add ucitrack pcimodem + set ucitrack.@pcimodem[-1].init=pcimodem + commit ucitrack +EOF + +rm -f /tmp/luci-indexcache +exit 0 diff --git a/wwan/app/luci-app-usbmodem/Makefile b/wwan/app/luci-app-usbmodem/Makefile new file mode 100644 index 0000000..447ca3c --- /dev/null +++ b/wwan/app/luci-app-usbmodem/Makefile @@ -0,0 +1,19 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +LUCI_TITLE:=Modem Server +LUCI_DEPENDS:=+luci-compat +quectel-CM-5G +kmod-usb-acm \ + +kmod-usb-net-cdc-ether +kmod-usb-net-cdc-mbim \ + +kmod-usb-net-qmi-wwan +kmod-usb-net-rndis \ + +kmod-usb-serial-option +kmod-usb-wdm \ + +kmod-qmi_wwan_f +kmod-qmi_wwan_q + +include $(TOPDIR)/feeds/luci/luci.mk + +# call BuildPackage - OpenWrt buildroot signature diff --git a/wwan/app/luci-app-usbmodem/luasrc/controller/usbmodem.lua b/wwan/app/luci-app-usbmodem/luasrc/controller/usbmodem.lua new file mode 100644 index 0000000..6cf943c --- /dev/null +++ b/wwan/app/luci-app-usbmodem/luasrc/controller/usbmodem.lua @@ -0,0 +1,9 @@ +module("luci.controller.usbmodem", package.seeall) + +function index() + if not nixio.fs.access("/etc/config/usbmodem") then + return + end + + entry({"admin", "network", "usbmodem"}, cbi("usbmodem"), _("USB Modem Server"), 80).dependent = false +end diff --git a/wwan/app/luci-app-usbmodem/luasrc/model/cbi/usbmodem.lua b/wwan/app/luci-app-usbmodem/luasrc/model/cbi/usbmodem.lua new file mode 100644 index 0000000..97af802 --- /dev/null +++ b/wwan/app/luci-app-usbmodem/luasrc/model/cbi/usbmodem.lua @@ -0,0 +1,51 @@ +-- Copyright 2016 David Thornley +-- Licensed to the public under the Apache License 2.0. + +mp = Map("usbmodem") +mp.title = translate("USB Modem Server") +mp.description = translate("Modem Server For OpenWrt") + +s = mp:section(TypedSection, "service", "Base Setting") +s.anonymous = true + +enabled = s:option(Flag, "enabled", translate("Enable")) +enabled.default = 0 +enabled.rmempty = false + +device = s:option(Value, "device", translate("Modem device")) +device.rmempty = false + +local device_suggestions = nixio.fs.glob("/dev/cdc-wdm*") + +if device_suggestions then + local node + for node in device_suggestions do + device:value(node) + end +end + +apn = s:option(Value, "apn", translate("APN")) +apn.rmempty = true + +pincode = s:option(Value, "pincode", translate("PIN")) +pincode.rmempty = true + +username = s:option(Value, "username", translate("PAP/CHAP username")) +username.rmempty = true + +password = s:option(Value, "password", translate("PAP/CHAP password")) +password.rmempty = true + +auth = s:option(Value, "auth", translate("Authentication Type")) +auth.rmempty = true +auth:value("", translate("-- Please choose --")) +auth:value("both", "PAP/CHAP (both)") +auth:value("pap", "PAP") +auth:value("chap", "CHAP") +auth:value("none", "NONE") + +tool = s:option(Value, "tool", translate("Tools")) +tool:value("quectel-CM", "quectel-CM") +tool.rmempty = true + +return mp diff --git a/wwan/app/luci-app-usbmodem/root/etc/config/usbmodem b/wwan/app/luci-app-usbmodem/root/etc/config/usbmodem new file mode 100644 index 0000000..8d7627b --- /dev/null +++ b/wwan/app/luci-app-usbmodem/root/etc/config/usbmodem @@ -0,0 +1,5 @@ +config service + option tool 'quectel-CM' + option device '/dev/cdc-wdm0' + option enabled '0' + diff --git a/wwan/app/luci-app-usbmodem/root/etc/init.d/usbmodem b/wwan/app/luci-app-usbmodem/root/etc/init.d/usbmodem new file mode 100755 index 0000000..e5f1081 --- /dev/null +++ b/wwan/app/luci-app-usbmodem/root/etc/init.d/usbmodem @@ -0,0 +1,80 @@ +#!/bin/sh /etc/rc.common +# Copyright (C) 2006-2014 OpenWrt.org + +START=99 +STOP=16 +USE_PROCD=1 +#使用procd启动 + +run_4g() +{ + local enabled + config_get_bool enabled $1 enabled + + echo "run 4G" >> /tmp/log4g + + if [ "$enabled" = "1" ]; then + local user + local password + local apn + local auth + local pincode + local device + local tool + + # echo "enable 4G" >> /tmp/log4g + config_get user $1 user + config_get password $1 password + config_get apn $1 apn + config_get auth $1 auth + config_get pincode $1 pincode + config_get device $1 device + config_get tool $1 tool + config_get tty $1 tty + config_get atcmd $1 atcmd + + devname="$(basename "$device")" + devpath="$(readlink -f /sys/class/usbmisc/$devname/device/)" + ifname="$( ls "$devpath"/net )" + + if [ "$tool" = "at" ];then + at_tool "$atcmd" -d $tty + else + procd_open_instance + #创建一个实例, 在procd看来一个应用程序可以多个实\E4\BE? + #ubus call service list 可以查看实例 + procd_set_param command $tool -i $ifname -s $apn + if [ "$password" != "" ];then + procd_append_param command $user $password $auth + fi + if [ "$pincode" != "" ]; then + procd_append_param command -p $pincode + fi + # procd_append_param command -f /tmp/4g.log + procd_set_param respawn + echo "quectel-CM has started." + procd_close_instance + #关闭实例 + fi + + fi +} + + +service_triggers() +{ + procd_add_reload_trigger "usbmodem" +} + +start_service() { + config_load usbmodem + config_foreach run_4g service +} + +stop_service() +{ + echo "4G stop" >> /tmp/log4g + killall quectel-CM + echo "quectel-CM has stoped." +} + diff --git a/wwan/app/luci-app-usbmodem/root/etc/uci-defaults/luci-usbmodem b/wwan/app/luci-app-usbmodem/root/etc/uci-defaults/luci-usbmodem new file mode 100755 index 0000000..1b2ba06 --- /dev/null +++ b/wwan/app/luci-app-usbmodem/root/etc/uci-defaults/luci-usbmodem @@ -0,0 +1,11 @@ +#!/bin/sh + +uci -q batch <<-EOF >/dev/null + delete ucitrack.@usbmodem[-1] + add ucitrack usbmodem + set ucitrack.@usbmodem[-1].init=usbmodem + commit ucitrack +EOF + +rm -f /tmp/luci-indexcache +exit 0 diff --git a/wwan/app/quectel_cm_5G/Makefile b/wwan/app/quectel_cm_5G/Makefile new file mode 100644 index 0000000..85507e6 --- /dev/null +++ b/wwan/app/quectel_cm_5G/Makefile @@ -0,0 +1,39 @@ +include $(TOPDIR)/rules.mk + +PKG_NAME:= quectel-CM-5G +PKG_VERSION:=1.6.5 +PKG_RELEASE:=1 + +include $(INCLUDE_DIR)/package.mk + +define Package/quectel-CM-5G + SECTION:=utils + CATEGORY:=Utilities + TITLE:=quectel-CM-5G app +endef + +define Build/Prepare + mkdir -p $(PKG_BUILD_DIR) + $(CP) ./src/* $(PKG_BUILD_DIR)/ +endef + +define Build/Compile + $(MAKE) -C "$(PKG_BUILD_DIR)" \ + EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \ + CROSS_COMPILE="$(TARGET_CROSS)" \ + ARCH="$(LINUX_KARCH)" \ + M="$(PKG_BUILD_DIR)" \ + CC="$(TARGET_CC)" +endef + +define Package/quectel-CM-5G/install + $(INSTALL_DIR) $(1)/usr/bin $(1)/lib/netifd/proto + $(INSTALL_BIN) $(PKG_BUILD_DIR)/quectel-CM $(1)/usr/bin + $(INSTALL_BIN) ./files/rmnet_init.sh $(1)/lib/netifd + $(INSTALL_BIN) ./files/rmnet.script $(1)/lib/netifd + $(INSTALL_BIN) ./files/rmnet.sh $(1)/lib/netifd/proto + $(INSTALL_BIN) ./files/rmnet6.sh $(1)/lib/netifd/proto + $(INSTALL_BIN) ./files/rmnet6.script $(1)/lib/netifd +endef + +$(eval $(call BuildPackage,quectel-CM-5G)) diff --git a/wwan/app/quectel_cm_5G/files/dhcp b/wwan/app/quectel_cm_5G/files/dhcp new file mode 100644 index 0000000..7673927 --- /dev/null +++ b/wwan/app/quectel_cm_5G/files/dhcp @@ -0,0 +1,48 @@ + +config dnsmasq + option domainneeded '1' + option boguspriv '1' + option filterwin2k '0' + option localise_queries '1' + option rebind_protection '1' + option rebind_localhost '1' + option local '/lan/' + option domain 'lan' + option expandhosts '1' + option nonegcache '0' + option authoritative '1' + option readethers '1' + option leasefile '/tmp/dhcp.leases' + option resolvfile '/tmp/resolv.conf.auto' + option nonwildcard '1' + option localservice '1' + +config dhcp 'lan' + option interface 'lan' + option start '100' + option limit '150' + option leasetime '12h' + option ra 'relay' + option dhcpv6 'disabled' + option ndp 'relay' + +config dhcp 'wan' + option interface 'wan' + option ignore '1' + option ra 'relay' + option dhcpv6 'disabled' + option ndp 'relay' + option ndproxy_routing '0' + option master '1' + +config dhcp 'wan6' + option ra 'relay' + option dhcpv6 'disabled' + option ndp 'relay' + option ndproxy_routing '0' + option master '1' + option interface 'wan6' + +config odhcpd 'odhcpd' + option loglevel '7' + diff --git a/wwan/app/quectel_cm_5G/files/rmnet.script b/wwan/app/quectel_cm_5G/files/rmnet.script new file mode 100644 index 0000000..5bafb90 --- /dev/null +++ b/wwan/app/quectel_cm_5G/files/rmnet.script @@ -0,0 +1,66 @@ +#!/bin/sh +# Copyright (c) 2019 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. + + +[ -z "$1" ] && echo "Error: should be run by rmnet" && exit 1 +[ -z "$2" ] && echo "Error: should be run by rmnet" && exit 1 + +. /lib/functions.sh +. /lib/functions/network.sh +. /lib/netifd/netifd-proto.sh + +setup_interface() { + INTERFACE=$1 + CONFIG=/tmp/rmnet_$2_ipv4config + logger "rmnet setup_interface $1 $2 here" + #Fetch information from lower. + [ -f ${CONFIG} ] || { + proto_notify_error "$INTERFACE" "RMNET data call Not ready" + proto_block_restart "$INTERFACE" + return + } + . ${CONFIG} + ip=$PUBLIC_IP + DNS=$DNSSERVERS + router=$GATEWAY + subnet=$NETMASK + interface=$IFNAME + #Send the information to the netifd + proto_init_update "$interface" 1 1 + #ip and subnet + proto_add_ipv4_address "$ip" "${subnet:-255.255.255.0}" + + #Any router? if not, remove below scripts + #router format should be separated by space + for i in $router; do + proto_add_ipv4_route "$i" 32 "" "$ip" + proto_add_ipv4_route 0.0.0.0 0 "$i" "$ip" + done + + #dns information tell the netifd. + for dns in $DNS; do + proto_add_dns_server "$dns" + done + + #Domain information tell the netifd + for domain in $domain; do + proto_add_dns_search "$domain" + done + + #proto_add_data + [ -n "$ZONE" ] && json_add_string zone "$ZONE" + proto_close_data + + proto_send_update "$INTERFACE" + +} + +case "$1" in + renew) + setup_interface $2 $3 + ;; +esac + +exit 0 diff --git a/wwan/app/quectel_cm_5G/files/rmnet.sh b/wwan/app/quectel_cm_5G/files/rmnet.sh new file mode 100644 index 0000000..101ee60 --- /dev/null +++ b/wwan/app/quectel_cm_5G/files/rmnet.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# Copyright (c) 2019 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. + +. /lib/functions.sh +. /lib/functions/network.sh +. ../netifd-proto.sh + +init_proto "$@" + +proto_rmnet_setup() { + local cfg="$1" + local iface="$2" + + logger "rmnet started" + #Call rmnet management script below!! + logger "rmnet updated ${cfg} ${iface}" + /lib/netifd/rmnet.script renew $cfg $iface +} + +proto_rmnet_teardown() { + local cfg="$1" + #Tear down rmnet manager script here.*/ +} + +proto_rmnet_init_config() { + #ddno_device=1 + available=1 +} + +add_protocol rmnet diff --git a/wwan/app/quectel_cm_5G/files/rmnet6.script b/wwan/app/quectel_cm_5G/files/rmnet6.script new file mode 100644 index 0000000..bbe05b2 --- /dev/null +++ b/wwan/app/quectel_cm_5G/files/rmnet6.script @@ -0,0 +1,61 @@ +#!/bin/sh +# Copyright (c) 2019 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. + + +[ -z "$1" ] && echo "Error: should be run by rmnet" && exit 1 +[ -z "$2" ] && echo "Error: should be run by rmnet" && exit 1 + +. /lib/functions.sh +. /lib/functions/network.sh +. /lib/netifd/netifd-proto.sh + +setup_interface() { + INTERFACE=$1 + CONFIG=/tmp/rmnet_$2_ipv6config + logger "rmnet setup_interface $1 $2 here" + #Fetch information from lower. + [ -f ${CONFIG} ] || { + proto_notify_error "$INTERFACE" "RMNET data call NOT ready" + proto_block_restart "$INTERFACE" + return + } + . ${CONFIG} + ADDRESSES=$PUBLIC_IP + interface=$IFNAME + #Send the information to the netifd + proto_init_update "$interface" 1 1 + + #ip and subnet + proto_add_ipv6_address "${PUBLIC_IP}" "128" + proto_add_ipv6_prefix "${PUBLIC_IP}/${PrefixLength}" + + #router format should be separated by space + proto_add_ipv6_route "$GATEWAY" 128 + proto_add_ipv6_route "::0" 0 "$GATEWAY" "" "" "${PUBLIC_IP}/${PrefixLength}" + + #dns information tell the netifd. + for dns in $DNSSERVERS; do + proto_add_dns_server "$dns" + done + + #Domain information tell the netifd + for domain in $domain; do + proto_add_dns_search "$domain" + done + + #proto_add_data + [ -n "$ZONE" ] && json_add_string zone "$ZONE" + proto_close_data + + proto_send_update "$INTERFACE" +} + +case "$1" in + renew|bound) + setup_interface $2 $3 + ;; +esac + +exit 0 diff --git a/wwan/app/quectel_cm_5G/files/rmnet6.sh b/wwan/app/quectel_cm_5G/files/rmnet6.sh new file mode 100644 index 0000000..57a5495 --- /dev/null +++ b/wwan/app/quectel_cm_5G/files/rmnet6.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# Copyright (c) 2019 Qualcomm Technologies, Inc. +# All Rights Reserved. +# Confidential and Proprietary - Qualcomm Technologies, Inc. + +. /lib/functions.sh +. /lib/functions/network.sh +. ../netifd-proto.sh + +init_proto "$@" + +proto_rmnet6_setup() { + local cfg="$1" + local iface="$2" + + logger "rmnet6 started" + #Call rmnet management script below!! + /lib/netifd/rmnet6.script renew $cfg $iface + logger "rmnet6 updated" +} + +proto_rmnet6_teardown() { + local cfg="$1" + #Tear down rmnet manager script here.*/ +} + +proto_rmnet6_init_config() { + #ddno_device=1 + available=1 +} + +add_protocol rmnet6 diff --git a/wwan/app/quectel_cm_5G/files/rmnet_init.sh b/wwan/app/quectel_cm_5G/files/rmnet_init.sh new file mode 100644 index 0000000..3d55438 --- /dev/null +++ b/wwan/app/quectel_cm_5G/files/rmnet_init.sh @@ -0,0 +1,31 @@ +#!/bin/sh + +uci set network.wan='interface' +uci set network.wan.ifname='wwan0' +uci set network.wan.proto='rmnet' + +uci set network.wan6='interface' +uci set network.wan6.ifname='wwan0' +uci set network.wan6.proto='rmnet6' + +uci set dhcp.lan.ra='relay' +uci set dhcp.lan.dhcpv6='disabled' +uci set dhcp.lan.ndp='relay' + +uci set dhcp.wan.ra='relay' +uci set dhcp.wan.dhcpv6='disabled' +uci set dhcp.wan.ndp='relay' +uci set dhcp.wan.ndproxy_routing='0' + +uci set dhcp.wan6=dhcp +uci set dhcp.wan6.interface='wan6' +uci set dhcp.wan6.ra='relay' +uci set dhcp.wan6.dhcpv6='disabled' +uci set dhcp.wan6.ndp='relay' +uci set dhcp.wan6.ndproxy_routing='0' +uci set dhcp.wan6.master='1' + +uci set dhcp.odhcpd=odhcpd +uci set dhcp.odhcpd.loglevel='7' + +uci commit diff --git a/wwan/app/quectel_cm_5G/src/CMakeLists.txt b/wwan/app/quectel_cm_5G/src/CMakeLists.txt new file mode 100644 index 0000000..8ce3f5a --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/CMakeLists.txt @@ -0,0 +1,36 @@ +cmake_minimum_required(VERSION 2.4) + +project(quectel-CM) +add_definitions(-Wall -Wextra -Werror -O1) +option(USE_QRTR "Enable QRTR" OFF) + +set( QL_CM_SRC + QmiWwanCM.c GobiNetCM.c main.c MPQMUX.c QMIThread.c util.c qmap_bridge_mode.c mbim-cm.c device.c + atc.c atchannel.c at_tok.c + udhcpc.c + ) + +if(USE_QRTR) +add_definitions(-DCONFIG_QRTR) +set( QRTR_SRC qrtr.c rmnetctl.c) +endif() + +add_executable(quectel-CM ${QL_CM_SRC} ${QRTR_SRC}) +target_link_libraries(quectel-CM PUBLIC pthread) +install (TARGETS quectel-CM DESTINATION bin) + +add_executable(quectel-qmi-proxy quectel-qmi-proxy.c) +target_link_libraries(quectel-qmi-proxy PUBLIC pthread) +install (TARGETS quectel-qmi-proxy DESTINATION bin) + +add_executable(quectel-mbim-proxy quectel-mbim-proxy.c) +target_link_libraries(quectel-mbim-proxy PUBLIC pthread) +install (TARGETS quectel-mbim-proxy DESTINATION bin) + +add_executable(quectel-atc-proxy quectel-atc-proxy.c atchannel.c at_tok.c util.c) +target_link_libraries(quectel-atc-proxy PUBLIC pthread) +install (TARGETS quectel-atc-proxy DESTINATION bin) + +add_executable(quectel-qrtr-proxy quectel-qrtr-proxy.c) +target_link_libraries(quectel-qrtr-proxy PUBLIC pthread) +install (TARGETS quectel-qrtr-proxy DESTINATION bin) diff --git a/wwan/app/quectel_cm_5G/src/GobiNetCM.c b/wwan/app/quectel_cm_5G/src/GobiNetCM.c new file mode 100644 index 0000000..e9c4c81 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/GobiNetCM.c @@ -0,0 +1,246 @@ +/****************************************************************************** + @file GobiNetCM.c + @brief GobiNet driver. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include "QMIThread.h" + +#ifdef CONFIG_GOBINET +static int qmiclientId[QMUX_TYPE_ALL]; + +// IOCTL to generate a client ID for this service type +#define IOCTL_QMI_GET_SERVICE_FILE 0x8BE0 + 1 + +// IOCTL to get the VIDPID of the device +#define IOCTL_QMI_GET_DEVICE_VIDPID 0x8BE0 + 2 + +// IOCTL to get the MEID of the device +#define IOCTL_QMI_GET_DEVICE_MEID 0x8BE0 + 3 + +static int GobiNetSendQMI(PQCQMIMSG pRequest) { + int ret, fd; + + fd = qmiclientId[pRequest->QMIHdr.QMIType]; + pRequest->QMIHdr.ClientId = (fd&0xFF) ? fd&0xFF : pRequest->QMIHdr.QMIType; + + if (fd <= 0) { + dbg_time("%s QMIType: %d has no clientID", __func__, pRequest->QMIHdr.QMIType); + return -ENODEV; + } + + // Always ready to write + if (1 == 1) { + ssize_t nwrites = le16_to_cpu(pRequest->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR); + ret = write(fd, &pRequest->MUXMsg, nwrites); + if (ret == nwrites) { + ret = 0; + } else { + dbg_time("%s write=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + } + } else { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + } + + return ret; +} + +static int GobiNetGetClientID(const char *qcqmi, UCHAR QMIType) { + int ClientId; + ClientId = cm_open_dev(qcqmi); + if (ClientId == -1) { + dbg_time("failed to open %s, errno: %d (%s)", qcqmi, errno, strerror(errno)); + return -1; + } + + if (ioctl(ClientId, IOCTL_QMI_GET_SERVICE_FILE, QMIType) != 0) { + dbg_time("failed to get ClientID for 0x%02x errno: %d (%s)", QMIType, errno, strerror(errno)); + close(ClientId); + ClientId = 0; + } + + switch (QMIType) { + case QMUX_TYPE_WDS: dbg_time("Get clientWDS = %d", ClientId); break; + case QMUX_TYPE_DMS: dbg_time("Get clientDMS = %d", ClientId); break; + case QMUX_TYPE_NAS: dbg_time("Get clientNAS = %d", ClientId); break; + case QMUX_TYPE_QOS: dbg_time("Get clientQOS = %d", ClientId); break; + case QMUX_TYPE_WMS: dbg_time("Get clientWMS = %d", ClientId); break; + case QMUX_TYPE_PDS: dbg_time("Get clientPDS = %d", ClientId); break; + case QMUX_TYPE_UIM: dbg_time("Get clientUIM = %d", ClientId); break; + case QMUX_TYPE_COEX: dbg_time("Get clientCOEX = %d", ClientId); break; + case QMUX_TYPE_WDS_ADMIN: dbg_time("Get clientWDA = %d", ClientId); + break; + default: break; + } + + return ClientId; +} + +static int GobiNetDeInit(void) { + unsigned int i; + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] != 0) + { + close(qmiclientId[i]); + qmiclientId[i] = 0; + } + } + + return 0; +} + +static void * GobiNetThread(void *pData) { + PROFILE_T *profile = (PROFILE_T *)pData; + const char *qcqmi = (const char *)profile->qmichannel; + int wait_for_request_quit = 0; + + qmiclientId[QMUX_TYPE_WDS] = GobiNetGetClientID(qcqmi, QMUX_TYPE_WDS); + if (profile->enable_ipv6) + qmiclientId[QMUX_TYPE_WDS_IPV6] = GobiNetGetClientID(qcqmi, QMUX_TYPE_WDS); + qmiclientId[QMUX_TYPE_DMS] = GobiNetGetClientID(qcqmi, QMUX_TYPE_DMS); + qmiclientId[QMUX_TYPE_NAS] = GobiNetGetClientID(qcqmi, QMUX_TYPE_NAS); + qmiclientId[QMUX_TYPE_UIM] = GobiNetGetClientID(qcqmi, QMUX_TYPE_UIM); +#ifdef CONFIG_COEX_WWAN_STATE + qmiclientId[QMUX_TYPE_COEX] = GobiNetGetClientID(qcqmi, QMUX_TYPE_COEX); +#endif + if (profile->qmap_mode == 0 || profile->loopback_state) {//when QMAP enabled, set data format in GobiNet Driver + qmiclientId[QMUX_TYPE_WDS_ADMIN] = GobiNetGetClientID(qcqmi, QMUX_TYPE_WDS_ADMIN); + profile->wda_client = qmiclientId[QMUX_TYPE_WDS_ADMIN]; + } + + //donot check clientWDA, there is only one client for WDA, if quectel-CM is killed by SIGKILL, i cannot get client ID for WDA again! + if (qmiclientId[QMUX_TYPE_WDS] == 0) /*|| (clientWDA == -1)*/ { + GobiNetDeInit(); + dbg_time("%s Failed to open %s, errno: %d (%s)", __func__, qcqmi, errno, strerror(errno)); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + pthread_exit(NULL); + return NULL; + } + + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); + + while (1) { + struct pollfd pollfds[16] = {{qmidevice_control_fd[1], POLLIN, 0}}; + int ne, ret, nevents = 1; + unsigned int i; + + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] != 0) + { + pollfds[nevents].fd = qmiclientId[i]; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents = 0; + nevents++; + } + } + + do { + ret = poll(pollfds, nevents, wait_for_request_quit ? 1000: -1); + } while ((ret < 0) && (errno == EINTR)); + + if (ret == 0 && wait_for_request_quit) { + QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI() + continue; + } + + if (ret <= 0) { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + break; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dbg_time("%s poll err/hup/inval", __func__); + dbg_time("epoll fd = %d, events = 0x%04x", fd, revents); + if (fd == qmidevice_control_fd[1]) { + } else { + } + if (revents & (POLLERR | POLLHUP | POLLNVAL)) + goto __GobiNetThread_quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (fd == qmidevice_control_fd[1]) { + int triger_event; + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + //DBG("triger_event = 0x%x", triger_event); + switch (triger_event) { + case RIL_REQUEST_QUIT: + goto __GobiNetThread_quit; + break; + case SIG_EVENT_STOP: + wait_for_request_quit = 1; + break; + default: + break; + } + } + continue; + } + + { + ssize_t nreads; + PQCQMIMSG pResponse = (PQCQMIMSG)cm_recv_buf; + + nreads = read(fd, &pResponse->MUXMsg, sizeof(cm_recv_buf) - sizeof(QCQMI_HDR)); + if (nreads <= 0) + { + dbg_time("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + break; + } + + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] == fd) + { + pResponse->QMIHdr.QMIType = i; + } + } + + pResponse->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pResponse->QMIHdr.Length = cpu_to_le16(nreads + sizeof(QCQMI_HDR) - 1); + pResponse->QMIHdr.CtlFlags = 0x00; + pResponse->QMIHdr.ClientId = (fd&0xFF) ? fd&0xFF : pResponse->QMIHdr.QMIType;; + + QmiThreadRecvQMI(pResponse); + } + } + } + +__GobiNetThread_quit: + GobiNetDeInit(); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI() + dbg_time("%s exit", __func__); + pthread_exit(NULL); + return NULL; +} + +const struct qmi_device_ops gobi_qmidev_ops = { + .deinit = GobiNetDeInit, + .send = GobiNetSendQMI, + .read = GobiNetThread, +}; +#endif + diff --git a/wwan/app/quectel_cm_5G/src/Makefile b/wwan/app/quectel_cm_5G/src/Makefile new file mode 100644 index 0000000..5219a21 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/Makefile @@ -0,0 +1,46 @@ +ifneq ($(CROSS_COMPILE),) +CROSS-COMPILE:=$(CROSS_COMPILE) +endif +#CROSS-COMPILE:=/workspace/buildroot/buildroot-qemu_mips_malta_defconfig/output/host/usr/bin/mips-buildroot-linux-uclibc- +#CROSS-COMPILE:=/workspace/buildroot/buildroot-qemu_arm_vexpress_defconfig/output/host/usr/bin/arm-buildroot-linux-uclibcgnueabi- +#CROSS-COMPILE:=/workspace/buildroot-git/qemu_mips64_malta/output/host/usr/bin/mips-gnu-linux- +ifeq ($(CC),cc) +CC:=$(CROSS-COMPILE)gcc +endif +LD:=$(CROSS-COMPILE)ld + +QL_CM_SRC=QmiWwanCM.c GobiNetCM.c main.c QCQMUX.c QMIThread.c util.c qmap_bridge_mode.c mbim-cm.c device.c +QL_CM_SRC+=atc.c atchannel.c at_tok.c +#QL_CM_SRC+=qrtr.c rmnetctl.c +ifeq (1,1) +QL_CM_DHCP=udhcpc.c +else +LIBMNL=libmnl/ifutils.c libmnl/attr.c libmnl/callback.c libmnl/nlmsg.c libmnl/socket.c +DHCP=libmnl/dhcp/dhcpclient.c libmnl/dhcp/dhcpmsg.c libmnl/dhcp/packet.c +QL_CM_DHCP=udhcpc_netlink.c +QL_CM_DHCP+=${LIBMNL} +endif + +CFLAGS += -Wall -Wextra -Werror -O1 #-s +LDFLAGS += -lpthread -ldl -lrt + +release: clean qmi-proxy mbim-proxy atc-proxy #qrtr-proxy + $(CC) ${CFLAGS} ${QL_CM_SRC} ${QL_CM_DHCP} -o quectel-CM ${LDFLAGS} + +debug: clean + $(CC) ${CFLAGS} -g -DCM_DEBUG ${QL_CM_SRC} ${QL_CM_DHCP} -o quectel-CM -lpthread -ldl -lrt + +qmi-proxy: + $(CC) ${CFLAGS} quectel-qmi-proxy.c -o quectel-qmi-proxy ${LDFLAGS} + +mbim-proxy: + $(CC) ${CFLAGS} quectel-mbim-proxy.c -o quectel-mbim-proxy ${LDFLAGS} + +qrtr-proxy: + $(CC) ${CFLAGS} quectel-qrtr-proxy.c -o quectel-qrtr-proxy ${LDFLAGS} + +atc-proxy: + $(CC) ${CFLAGS} quectel-atc-proxy.c atchannel.c at_tok.c util.c -o quectel-atc-proxy ${LDFLAGS} + +clean: + rm -rf *.o libmnl/*.o quectel-CM quectel-qmi-proxy quectel-mbim-proxy quectel-atc-proxy diff --git a/wwan/app/quectel_cm_5G/src/Makefile.am b/wwan/app/quectel_cm_5G/src/Makefile.am new file mode 100644 index 0000000..87e5266 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/Makefile.am @@ -0,0 +1,22 @@ +bin_PROGRAMS = quectel-CM +QL_CM_SRC=QmiWwanCM.c GobiNetCM.c main.c MPQMUX.c QMIThread.c util.c qmap_bridge_mode.c mbim-cm.c device.c +QL_CM_SRC+=atc.c atchannel.c at_tok.c +#QL_CM_SRC+=qrtr.c rmnetctl.c +QL_CM_DHCP=udhcpc.c +if USE_QRTR +quectel_CM_CFLAGS = -DCONFIG_QRTR +QL_CM_SRC += qrtr.c rmnetctl.c +if USE_MSM_IPC +quectel_CM_CFLAGS += -DUSE_LINUX_MSM_IPC +endif +endif + +quectel_CM_SOURCES = ${QL_CM_SRC} ${QL_CM_DHCP} + +bin_PROGRAMS += quectel-qmi-proxy +quectel_qmi_proxy_SOURCES = quectel-qmi-proxy.c + +bin_PROGRAMS += quectel-mbim-proxy +quectel_mbim_proxy_SOURCES = quectel-mbim-proxy.c +LIBS = -l pthread +CFLAGS = -Wall -Wextra -Werror -O1 diff --git a/wwan/app/quectel_cm_5G/src/NOTICE b/wwan/app/quectel_cm_5G/src/NOTICE new file mode 100644 index 0000000..0a062cf --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/NOTICE @@ -0,0 +1,7 @@ +This program is totally open souce code, and public domain software for customers of Quectel company. + +The APIs of QMI WWAMN interfaces are defined by Qualcomm. And this program complies with Qualcomm QMI WWAN interfaces specification. + +Customers are free to modify the source codes and redistribute them. + +For those who is not Quectel's customer, all rights are closed, and any copying and commercial development over this progrma is not allowed. diff --git a/wwan/app/quectel_cm_5G/src/QCQCTL.h b/wwan/app/quectel_cm_5G/src/QCQCTL.h new file mode 100644 index 0000000..eaf2ad6 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/QCQCTL.h @@ -0,0 +1,394 @@ +/****************************************************************************** + @file QCQCTL.h + + DESCRIPTION + This module contains QMI QCTL module. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ + + +#ifndef QCQCTL_H +#define QCQCTL_H + +#include "QCQMI.h" + +#pragma pack(push, 1) + +// ================= QMICTL ================== + +// QMICTL Control Flags +#define QMICTL_CTL_FLAG_CMD 0x00 +#define QMICTL_CTL_FLAG_RSP 0x01 +#define QMICTL_CTL_FLAG_IND 0x02 + +#if 0 +typedef struct _QMICTL_TRANSACTION_ITEM +{ + LIST_ENTRY List; + UCHAR TransactionId; // QMICTL transaction id + PVOID Context; // Adapter or IocDev + PIRP Irp; +} QMICTL_TRANSACTION_ITEM, *PQMICTL_TRANSACTION_ITEM; +#endif + +typedef struct _QCQMICTL_MSG_HDR +{ + UCHAR CtlFlags; // 00-cmd, 01-rsp, 10-ind + UCHAR TransactionId; + USHORT QMICTLType; + USHORT Length; +} __attribute__ ((packed)) QCQMICTL_MSG_HDR, *PQCQMICTL_MSG_HDR; + +#define QCQMICTL_MSG_HDR_SIZE sizeof(QCQMICTL_MSG_HDR) + +typedef struct _QCQMICTL_MSG_HDR_RESP +{ + UCHAR CtlFlags; // 00-cmd, 01-rsp, 10-ind + UCHAR TransactionId; + USHORT QMICTLType; + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} __attribute__ ((packed)) QCQMICTL_MSG_HDR_RESP, *PQCQMICTL_MSG_HDR_RESP; + +typedef struct _QCQMICTL_MSG +{ + UCHAR CtlFlags; // 00-cmd, 01-rsp, 10-ind + UCHAR TransactionId; + USHORT QMICTLType; + USHORT Length; + UCHAR Payload; +} __attribute__ ((packed)) QCQMICTL_MSG, *PQCQMICTL_MSG; + +// TLV Header +typedef struct _QCQMICTL_TLV_HDR +{ + UCHAR TLVType; + USHORT TLVLength; +} __attribute__ ((packed)) QCQMICTL_TLV_HDR, *PQCQMICTL_TLV_HDR; + +#define QCQMICTL_TLV_HDR_SIZE sizeof(QCQMICTL_TLV_HDR) + +// QMICTL Type +#define QMICTL_SET_INSTANCE_ID_REQ 0x0020 +#define QMICTL_SET_INSTANCE_ID_RESP 0x0020 +#define QMICTL_GET_VERSION_REQ 0x0021 +#define QMICTL_GET_VERSION_RESP 0x0021 +#define QMICTL_GET_CLIENT_ID_REQ 0x0022 +#define QMICTL_GET_CLIENT_ID_RESP 0x0022 +#define QMICTL_RELEASE_CLIENT_ID_REQ 0x0023 +#define QMICTL_RELEASE_CLIENT_ID_RESP 0x0023 +#define QMICTL_REVOKE_CLIENT_ID_IND 0x0024 +#define QMICTL_INVALID_CLIENT_ID_IND 0x0025 +#define QMICTL_SET_DATA_FORMAT_REQ 0x0026 +#define QMICTL_SET_DATA_FORMAT_RESP 0x0026 +#define QMICTL_SYNC_REQ 0x0027 +#define QMICTL_SYNC_RESP 0x0027 +#define QMICTL_SYNC_IND 0x0027 +#define QMI_MESSAGE_CTL_INTERNAL_PROXY_OPEN 0xFF00 + +#define QMICTL_FLAG_REQUEST 0x00 +#define QMICTL_FLAG_RESPONSE 0x01 +#define QMICTL_FLAG_INDICATION 0x02 + +// QMICTL Message Definitions + +typedef struct _QMICTL_SET_INSTANCE_ID_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_SET_INSTANCE_ID_REQ + USHORT Length; // 4 + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 1 + UCHAR Value; // Host-unique QMI instance for this device driver +} __attribute__ ((packed)) QMICTL_SET_INSTANCE_ID_REQ_MSG, *PQMICTL_SET_INSTANCE_ID_REQ_MSG; + +typedef struct _QMICTL_SET_INSTANCE_ID_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_SET_INSTANCE_ID_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; + USHORT QMIError; + UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLV2Length; // 0x0002 + USHORT QMI_ID; // Upper byte is assigned by MSM, + // lower assigned by host +} __attribute__ ((packed)) QMICTL_SET_INSTANCE_ID_RESP_MSG, *PQMICTL_SET_INSTANCE_ID_RESP_MSG; + +typedef struct _QMICTL_GET_VERSION_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_GET_VERSION_REQ + USHORT Length; // 0 + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // var + UCHAR QMUXTypes; // List of one byte QMUX_TYPE values + // 0xFF returns a list of versions for all + // QMUX_TYPEs implemented on the device +} __attribute__ ((packed)) QMICTL_GET_VERSION_REQ_MSG, *PQMICTL_GET_VERSION_REQ_MSG; + +typedef struct _QMUX_TYPE_VERSION_STRUCT +{ + UCHAR QMUXType; + USHORT MajorVersion; + USHORT MinorVersion; +} __attribute__ ((packed)) QMUX_TYPE_VERSION_STRUCT, *PQMUX_TYPE_VERSION_STRUCT; + +typedef struct _ADDENDUM_VERSION_PREAMBLE +{ + UCHAR LabelLength; + UCHAR Label; +} __attribute__ ((packed)) ADDENDUM_VERSION_PREAMBLE, *PADDENDUM_VERSION_PREAMBLE; + +#define QMICTL_GET_VERSION_RSP_TLV_TYPE_VERSION 0x01 +#define QMICTL_GET_VERSION_RSP_TLV_TYPE_ADD_VERSION 0x10 + +typedef struct _QMICTL_GET_VERSION_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_GET_VERSION_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; + USHORT QMIError; + UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLV2Length; // var + UCHAR NumElements; // Num of QMUX_TYPE_VERSION_STRUCT + QMUX_TYPE_VERSION_STRUCT TypeVersion[0]; +} __attribute__ ((packed)) QMICTL_GET_VERSION_RESP_MSG, *PQMICTL_GET_VERSION_RESP_MSG; + +typedef struct _QMICTL_GET_CLIENT_ID_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_GET_CLIENT_ID_REQ + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 1 + UCHAR QMIType; // QMUX type +} __attribute__ ((packed)) QMICTL_GET_CLIENT_ID_REQ_MSG, *PQMICTL_GET_CLIENT_ID_REQ_MSG; + +typedef struct _QMICTL_GET_CLIENT_ID_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_GET_CLIENT_ID_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; // result code + USHORT QMIError; // error code + UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLV2Length; // 2 + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QMICTL_GET_CLIENT_ID_RESP_MSG, *PQMICTL_GET_CLIENT_ID_RESP_MSG; + +typedef struct _QMICTL_RELEASE_CLIENT_ID_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_RELEASE_CLIENT_ID_REQ + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 0x0002 + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QMICTL_RELEASE_CLIENT_ID_REQ_MSG, *PQMICTL_RELEASE_CLIENT_ID_REQ_MSG; + +typedef struct _QMICTL_RELEASE_CLIENT_ID_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_RELEASE_CLIENT_ID_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; // result code + USHORT QMIError; // error code + UCHAR TLV2Type; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLV2Length; // 2 + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QMICTL_RELEASE_CLIENT_ID_RESP_MSG, *PQMICTL_RELEASE_CLIENT_ID_RESP_MSG; + +typedef struct _QMICTL_REVOKE_CLIENT_ID_IND_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_INDICATION + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_REVOKE_CLIENT_ID_IND + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 0x0002 + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QMICTL_REVOKE_CLIENT_ID_IND_MSG, *PQMICTL_REVOKE_CLIENT_ID_IND_MSG; + +typedef struct _QMICTL_INVALID_CLIENT_ID_IND_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_INDICATION + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_REVOKE_CLIENT_ID_IND + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 0x0002 + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QMICTL_INVALID_CLIENT_ID_IND_MSG, *PQMICTL_INVALID_CLIENT_ID_IND_MSG; + +typedef struct _QMICTL_SET_DATA_FORMAT_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_SET_DATA_FORMAT_REQ + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_REQUIRED_PARAMETER + USHORT TLVLength; // 1 + UCHAR DataFormat; // 0-default; 1-QoS hdr present +} __attribute__ ((packed)) QMICTL_SET_DATA_FORMAT_REQ_MSG, *PQMICTL_SET_DATA_FORMAT_REQ_MSG; + +#ifdef QC_IP_MODE +#define SET_DATA_FORMAT_TLV_TYPE_LINK_PROTO 0x10 +#define SET_DATA_FORMAT_LINK_PROTO_ETH 0x0001 +#define SET_DATA_FORMAT_LINK_PROTO_IP 0x0002 +typedef struct _QMICTL_SET_DATA_FORMAT_TLV_LINK_PROT +{ + UCHAR TLVType; // Link-Layer Protocol + USHORT TLVLength; // 2 + USHORT LinkProt; // 0x1: ETH; 0x2: IP +} QMICTL_SET_DATA_FORMAT_TLV_LINK_PROT, *PQMICTL_SET_DATA_FORMAT_TLV_LINK_PROT; + +#ifdef QCMP_UL_TLP +#define SET_DATA_FORMAT_TLV_TYPE_UL_TLP 0x11 +typedef struct _QMICTL_SET_DATA_FORMAT_TLV_UL_TLP +{ + UCHAR TLVType; // 0x11, Uplink TLP Setting + USHORT TLVLength; // 1 + UCHAR UlTlpSetting; // 0x0: Disable; 0x01: Enable +} QMICTL_SET_DATA_FORMAT_TLV_UL_TLP, *PQMICTL_SET_DATA_FORMAT_TLV_UL_TLP; +#endif // QCMP_UL_TLP + +#ifdef QCMP_DL_TLP +#define SET_DATA_FORMAT_TLV_TYPE_DL_TLP 0x13 +typedef struct _QMICTL_SET_DATA_FORMAT_TLV_DL_TLP +{ + UCHAR TLVType; // 0x11, Uplink TLP Setting + USHORT TLVLength; // 1 + UCHAR DlTlpSetting; // 0x0: Disable; 0x01: Enable +} QMICTL_SET_DATA_FORMAT_TLV_DL_TLP, *PQMICTL_SET_DATA_FORMAT_TLV_DL_TLP; +#endif // QCMP_DL_TLP + +#endif // QC_IP_MODE + +#ifdef MP_QCQOS_ENABLED +#define SET_DATA_FORMAT_TLV_TYPE_QOS_SETTING 0x12 +typedef struct _QMICTL_SET_DATA_FORMAT_TLV_QOS_SETTING +{ + UCHAR TLVType; // 0x12, QoS setting + USHORT TLVLength; // 1 + UCHAR QosSetting; // 0x0: Disable; 0x01: Enable +} QMICTL_SET_DATA_FORMAT_TLV_QOS_SETTING, *PQMICTL_SET_DATA_FORMAT_TLV_QOS_SETTING; +#endif // MP_QCQOS_ENABLED + +typedef struct _QMICTL_SET_DATA_FORMAT_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_SET_DATA_FORMAT_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; // result code + USHORT QMIError; // error code +} __attribute__ ((packed)) QMICTL_SET_DATA_FORMAT_RESP_MSG, *PQMICTL_SET_DATA_FORMAT_RESP_MSG; + +typedef struct _QMICTL_SYNC_REQ_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_REQUEST + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_CTL_SYNC_REQ + USHORT Length; // 0 +} __attribute__ ((packed)) QMICTL_SYNC_REQ_MSG, *PQMICTL_SYNC_REQ_MSG; + +typedef struct _QMICTL_SYNC_RESP_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_CTL_SYNC_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMIResult; + USHORT QMIError; +} __attribute__ ((packed)) QMICTL_SYNC_RESP_MSG, *PQMICTL_SYNC_RESP_MSG; + +typedef struct _QMICTL_SYNC_IND_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_INDICATION + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_REVOKE_CLIENT_ID_IND + USHORT Length; +} __attribute__ ((packed)) QMICTL_SYNC_IND_MSG, *PQMICTL_SYNC_IND_MSG; + +typedef struct _QMICTL_LIBQMI_PROXY_OPEN_MSG +{ + UCHAR CtlFlags; // QMICTL_FLAG_RESPONSE + UCHAR TransactionId; + USHORT QMICTLType; // QMICTL_SET_DATA_FORMAT_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + char device_path[0]; // result code +} __attribute__ ((packed)) QMICTL_LIBQMI_PROXY_OPEN_MSG, *PQMICTL_LIBQMI_PROXY_OPEN_MSG; + +typedef struct _QMICTL_MSG +{ + union + { + // Message Header + QCQMICTL_MSG_HDR QMICTLMsgHdr; + QCQMICTL_MSG_HDR_RESP QMICTLMsgHdrRsp; + + // QMICTL Message + QMICTL_SET_INSTANCE_ID_REQ_MSG SetInstanceIdReq; + QMICTL_SET_INSTANCE_ID_RESP_MSG SetInstanceIdRsp; + QMICTL_GET_VERSION_REQ_MSG GetVersionReq; + QMICTL_GET_VERSION_RESP_MSG GetVersionRsp; + QMICTL_GET_CLIENT_ID_REQ_MSG GetClientIdReq; + QMICTL_GET_CLIENT_ID_RESP_MSG GetClientIdRsp; + QMICTL_RELEASE_CLIENT_ID_REQ_MSG ReleaseClientIdReq; + QMICTL_RELEASE_CLIENT_ID_RESP_MSG ReleaseClientIdRsp; + QMICTL_REVOKE_CLIENT_ID_IND_MSG RevokeClientIdInd; + QMICTL_INVALID_CLIENT_ID_IND_MSG InvalidClientIdInd; + QMICTL_SET_DATA_FORMAT_REQ_MSG SetDataFormatReq; + QMICTL_SET_DATA_FORMAT_RESP_MSG SetDataFormatRsp; + QMICTL_SYNC_REQ_MSG SyncReq; + QMICTL_SYNC_RESP_MSG SyncRsp; + QMICTL_SYNC_IND_MSG SyncInd; + QMICTL_LIBQMI_PROXY_OPEN_MSG LibQmiProxyOpenReq; + }; +} __attribute__ ((packed)) QMICTL_MSG, *PQMICTL_MSG; +#pragma pack(pop) + +#endif //QCQCTL_H \ No newline at end of file diff --git a/wwan/app/quectel_cm_5G/src/QCQMI.h b/wwan/app/quectel_cm_5G/src/QCQMI.h new file mode 100644 index 0000000..2b2a260 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/QCQMI.h @@ -0,0 +1,320 @@ +/****************************************************************************** + @file QCQMI.h + + DESCRIPTION + This module contains QMI module. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ + + +#ifndef USBQMI_H +#define USBQMI_H + +typedef uint8_t uint8; +typedef int8_t int8; +typedef uint16_t uint16; +typedef int16_t int16; +typedef uint32_t uint32; +typedef uint64_t uint64; + +typedef signed char CHAR; +typedef unsigned char UCHAR; +typedef short SHORT; +typedef unsigned short USHORT; +typedef int INT; +typedef unsigned int UINT; +typedef long LONG; +typedef unsigned int ULONG; +typedef unsigned long long ULONG64; +typedef signed char *PCHAR; +typedef unsigned char *PUCHAR; +typedef int *PINT; +typedef int BOOL; + +#define TRUE (1 == 1) +#define FALSE (1 != 1) + +#define QMICTL_SUPPORTED_MAJOR_VERSION 1 +#define QMICTL_SUPPORTED_MINOR_VERSION 0 + +#pragma pack(push, 1) + +// ========= USB Control Message ========== + +#define USB_CTL_MSG_TYPE_QMI 0x01 + +// USB Control Message +typedef struct _QCUSB_CTL_MSG_HDR +{ + UCHAR IFType; +} __attribute__ ((packed)) QCUSB_CTL_MSG_HDR, *PQCUSB_CTL_MSG_HDR; + +#define QCUSB_CTL_MSG_HDR_SIZE sizeof(QCUSB_CTL_MSG_HDR) + +typedef struct _QCUSB_CTL_MSG +{ + UCHAR IFType; + UCHAR Message; +} __attribute__ ((packed)) QCUSB_CTL_MSG, *PQCUSB_CTL_MSG; + +#define QCTLV_TYPE_REQUIRED_PARAMETER 0x01 +#define QCTLV_TYPE_RESULT_CODE 0x02 + +// ================= QMI ================== + +// Define QMI Type +typedef enum _QMI_SERVICE_TYPE +{ + QMUX_TYPE_CTL = 0x00, + QMUX_TYPE_WDS = 0x01, + QMUX_TYPE_DMS = 0x02, + QMUX_TYPE_NAS = 0x03, + QMUX_TYPE_QOS = 0x04, + QMUX_TYPE_WMS = 0x05, + QMUX_TYPE_PDS = 0x06, + QMUX_TYPE_UIM = 0x0B, + QMUX_TYPE_WDS_IPV6 = 0x11, + QMUX_TYPE_WDS_ADMIN = 0x1A, + QMUX_TYPE_COEX = 0x22, + QMUX_TYPE_MAX = 0xFF, + QMUX_TYPE_ALL = 0xFF +} QMI_SERVICE_TYPE; + +typedef enum _QMI_RESULT_CODE_TYPE +{ + QMI_RESULT_SUCCESS = 0x0000, + QMI_RESULT_FAILURE = 0x0001 +} QMI_RESULT_CODE_TYPE; + +typedef enum _QMI_ERROR_CODE_TYPE +{ + QMI_ERR_NONE = 0x0000 + ,QMI_ERR_MALFORMED_MSG = 0x0001 + ,QMI_ERR_NO_MEMORY = 0x0002 + ,QMI_ERR_INTERNAL = 0x0003 + ,QMI_ERR_ABORTED = 0x0004 + ,QMI_ERR_CLIENT_IDS_EXHAUSTED = 0x0005 + ,QMI_ERR_UNABORTABLE_TRANSACTION = 0x0006 + ,QMI_ERR_INVALID_CLIENT_ID = 0x0007 + ,QMI_ERR_NO_THRESHOLDS = 0x0008 + ,QMI_ERR_INVALID_HANDLE = 0x0009 + ,QMI_ERR_INVALID_PROFILE = 0x000A + ,QMI_ERR_INVALID_PINID = 0x000B + ,QMI_ERR_INCORRECT_PIN = 0x000C + ,QMI_ERR_NO_NETWORK_FOUND = 0x000D + ,QMI_ERR_CALL_FAILED = 0x000E + ,QMI_ERR_OUT_OF_CALL = 0x000F + ,QMI_ERR_NOT_PROVISIONED = 0x0010 + ,QMI_ERR_MISSING_ARG = 0x0011 + ,QMI_ERR_ARG_TOO_LONG = 0x0013 + ,QMI_ERR_INVALID_TX_ID = 0x0016 + ,QMI_ERR_DEVICE_IN_USE = 0x0017 + ,QMI_ERR_OP_NETWORK_UNSUPPORTED = 0x0018 + ,QMI_ERR_OP_DEVICE_UNSUPPORTED = 0x0019 + ,QMI_ERR_NO_EFFECT = 0x001A + ,QMI_ERR_NO_FREE_PROFILE = 0x001B + ,QMI_ERR_INVALID_PDP_TYPE = 0x001C + ,QMI_ERR_INVALID_TECH_PREF = 0x001D + ,QMI_ERR_INVALID_PROFILE_TYPE = 0x001E + ,QMI_ERR_INVALID_SERVICE_TYPE = 0x001F + ,QMI_ERR_INVALID_REGISTER_ACTION = 0x0020 + ,QMI_ERR_INVALID_PS_ATTACH_ACTION = 0x0021 + ,QMI_ERR_AUTHENTICATION_FAILED = 0x0022 + ,QMI_ERR_PIN_BLOCKED = 0x0023 + ,QMI_ERR_PIN_PERM_BLOCKED = 0x0024 + ,QMI_ERR_SIM_NOT_INITIALIZED = 0x0025 + ,QMI_ERR_MAX_QOS_REQUESTS_IN_USE = 0x0026 + ,QMI_ERR_INCORRECT_FLOW_FILTER = 0x0027 + ,QMI_ERR_NETWORK_QOS_UNAWARE = 0x0028 + ,QMI_ERR_INVALID_QOS_ID = 0x0029 + ,QMI_ERR_INVALID_ID = 0x0029 + ,QMI_ERR_REQUESTED_NUM_UNSUPPORTED = 0x002A + ,QMI_ERR_INTERFACE_NOT_FOUND = 0x002B + ,QMI_ERR_FLOW_SUSPENDED = 0x002C + ,QMI_ERR_INVALID_DATA_FORMAT = 0x002D + ,QMI_ERR_GENERAL = 0x002E + ,QMI_ERR_UNKNOWN = 0x002F + ,QMI_ERR_INVALID_ARG = 0x0030 + ,QMI_ERR_INVALID_INDEX = 0x0031 + ,QMI_ERR_NO_ENTRY = 0x0032 + ,QMI_ERR_DEVICE_STORAGE_FULL = 0x0033 + ,QMI_ERR_DEVICE_NOT_READY = 0x0034 + ,QMI_ERR_NETWORK_NOT_READY = 0x0035 + ,QMI_ERR_CAUSE_CODE = 0x0036 + ,QMI_ERR_MESSAGE_NOT_SENT = 0x0037 + ,QMI_ERR_MESSAGE_DELIVERY_FAILURE = 0x0038 + ,QMI_ERR_INVALID_MESSAGE_ID = 0x0039 + ,QMI_ERR_ENCODING = 0x003A + ,QMI_ERR_AUTHENTICATION_LOCK = 0x003B + ,QMI_ERR_INVALID_TRANSITION = 0x003C + ,QMI_ERR_NOT_A_MCAST_IFACE = 0x003D + ,QMI_ERR_MAX_MCAST_REQUESTS_IN_USE = 0x003E + ,QMI_ERR_INVALID_MCAST_HANDLE = 0x003F + ,QMI_ERR_INVALID_IP_FAMILY_PREF = 0x0040 + ,QMI_ERR_SESSION_INACTIVE = 0x0041 + ,QMI_ERR_SESSION_INVALID = 0x0042 + ,QMI_ERR_SESSION_OWNERSHIP = 0x0043 + ,QMI_ERR_INSUFFICIENT_RESOURCES = 0x0044 + ,QMI_ERR_DISABLED = 0x0045 + ,QMI_ERR_INVALID_OPERATION = 0x0046 + ,QMI_ERR_INVALID_QMI_CMD = 0x0047 + ,QMI_ERR_TPDU_TYPE = 0x0048 + ,QMI_ERR_SMSC_ADDR = 0x0049 + ,QMI_ERR_INFO_UNAVAILABLE = 0x004A + ,QMI_ERR_SEGMENT_TOO_LONG = 0x004B + ,QMI_ERR_SEGMENT_ORDER = 0x004C + ,QMI_ERR_BUNDLING_NOT_SUPPORTED = 0x004D + ,QMI_ERR_OP_PARTIAL_FAILURE = 0x004E + ,QMI_ERR_POLICY_MISMATCH = 0x004F + ,QMI_ERR_SIM_FILE_NOT_FOUND = 0x0050 + ,QMI_ERR_EXTENDED_INTERNAL = 0x0051 + ,QMI_ERR_ACCESS_DENIED = 0x0052 + ,QMI_ERR_HARDWARE_RESTRICTED = 0x0053 + ,QMI_ERR_ACK_NOT_SENT = 0x0054 + ,QMI_ERR_INJECT_TIMEOUT = 0x0055 + ,QMI_ERR_INCOMPATIBLE_STATE = 0x005A + ,QMI_ERR_FDN_RESTRICT = 0x005B + ,QMI_ERR_SUPS_FAILURE_CAUSE = 0x005C + ,QMI_ERR_NO_RADIO = 0x005D + ,QMI_ERR_NOT_SUPPORTED = 0x005E + ,QMI_ERR_NO_SUBSCRIPTION = 0x005F + ,QMI_ERR_CARD_CALL_CONTROL_FAILED = 0x0060 + ,QMI_ERR_NETWORK_ABORTED = 0x0061 + ,QMI_ERR_MSG_BLOCKED = 0x0062 + ,QMI_ERR_INVALID_SESSION_TYPE = 0x0064 + ,QMI_ERR_INVALID_PB_TYPE = 0x0065 + ,QMI_ERR_NO_SIM = 0x0066 + ,QMI_ERR_PB_NOT_READY = 0x0067 + ,QMI_ERR_PIN_RESTRICTION = 0x0068 + ,QMI_ERR_PIN2_RESTRICTION = 0x0069 + ,QMI_ERR_PUK_RESTRICTION = 0x006A + ,QMI_ERR_PUK2_RESTRICTION = 0x006B + ,QMI_ERR_PB_ACCESS_RESTRICTED = 0x006C + ,QMI_ERR_PB_DELETE_IN_PROG = 0x006D + ,QMI_ERR_PB_TEXT_TOO_LONG = 0x006E + ,QMI_ERR_PB_NUMBER_TOO_LONG = 0x006F + ,QMI_ERR_PB_HIDDEN_KEY_RESTRICTION = 0x0070 +} QMI_ERROR_CODE_TYPE; + +#define QCQMI_CTL_FLAG_SERVICE 0x80 +#define QCQMI_CTL_FLAG_CTL_POINT 0x00 + +typedef struct _QCQMI_HDR +{ + UCHAR IFType; + USHORT Length; + UCHAR CtlFlags; // reserved + UCHAR QMIType; + UCHAR ClientId; +} __attribute__ ((packed)) QCQMI_HDR, *PQCQMI_HDR; + +#define QCQMI_HDR_SIZE (sizeof(QCQMI_HDR)-1) + +typedef struct _QCQMI +{ + UCHAR IFType; + USHORT Length; + UCHAR CtlFlags; // reserved + UCHAR QMIType; + UCHAR ClientId; + UCHAR SDU; +} __attribute__ ((packed)) QCQMI, *PQCQMI; + +typedef struct _QMI_SERVICE_VERSION +{ + USHORT Major; + USHORT Minor; + USHORT AddendumMajor; + USHORT AddendumMinor; +} __attribute__ ((packed)) QMI_SERVICE_VERSION, *PQMI_SERVICE_VERSION; + +// ================= QMUX ================== + +#define QMUX_MSG_OVERHEAD_BYTES 4 // Type(USHORT) Length(USHORT) -- header + +#define QMUX_BROADCAST_CID 0xFF + +typedef struct _QCQMUX_HDR +{ + UCHAR CtlFlags; // 0: single QMUX Msg; 1: + USHORT TransactionId; +} __attribute__ ((packed)) QCQMUX_HDR, *PQCQMUX_HDR; + +typedef struct _QCQMUX +{ + UCHAR CtlFlags; // 0: single QMUX Msg; 1: + USHORT TransactionId; + UCHAR Message; // Type(2), Length(2), Value +} __attribute__ ((packed)) QCQMUX, *PQCQMUX; + +#define QCQMUX_HDR_SIZE sizeof(QCQMUX_HDR) + +typedef struct _QCQMUX_MSG_HDR +{ + USHORT Type; + USHORT Length; +} __attribute__ ((packed)) QCQMUX_MSG_HDR, *PQCQMUX_MSG_HDR; + +#define QCQMUX_MSG_HDR_SIZE sizeof(QCQMUX_MSG_HDR) + +typedef struct _QCQMUX_MSG_HDR_RESP +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} __attribute__ ((packed)) QCQMUX_MSG_HDR_RESP, *PQCQMUX_MSG_HDR_RESP; + +typedef struct _QCQMUX_TLV +{ + UCHAR Type; + USHORT Length; + UCHAR Value; +} __attribute__ ((packed)) QCQMUX_TLV, *PQCQMUX_TLV; + +typedef struct _QMI_TLV_HDR +{ + UCHAR TLVType; + USHORT TLVLength; +} __attribute__ ((packed)) QMI_TLV_HDR, *PQMI_TLV_HDR; + +typedef struct _QMI_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + union { + int8_t s8; + uint8_t u8; + int16_t s16; + uint16_t u16; + int32_t s32; + uint32_t u32; + uint64_t u64; + }; +} __attribute__ ((packed)) QMI_TLV, *PQMI_TLV; + +// QMUX Message Definitions -- QMI SDU +#define QMUX_CTL_FLAG_SINGLE_MSG 0x00 +#define QMUX_CTL_FLAG_COMPOUND_MSG 0x01 +#define QMUX_CTL_FLAG_TYPE_CMD 0x00 +#define QMUX_CTL_FLAG_TYPE_RSP 0x02 +#define QMUX_CTL_FLAG_TYPE_IND 0x04 +#define QMUX_CTL_FLAG_MASK_COMPOUND 0x01 +#define QMUX_CTL_FLAG_MASK_TYPE 0x06 // 00-cmd, 01-rsp, 10-ind + +#pragma pack(pop) + +#endif // USBQMI_H diff --git a/wwan/app/quectel_cm_5G/src/QCQMUX.c b/wwan/app/quectel_cm_5G/src/QCQMUX.c new file mode 100644 index 0000000..97f2d1a --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/QCQMUX.c @@ -0,0 +1,477 @@ +/****************************************************************************** + @file MPQMUX.c + @brief QMI mux. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ + +#include "QMIThread.h" +static char line[1024]; +static pthread_mutex_t dumpQMIMutex = PTHREAD_MUTEX_INITIALIZER; +#undef dbg +#define dbg( format, arg... ) do {if (strlen(line) < sizeof(line)) snprintf(&line[strlen(line)], sizeof(line) - strlen(line), format, ## arg);} while (0) + +PQMI_TLV_HDR GetTLV (PQCQMUX_MSG_HDR pQMUXMsgHdr, int TLVType); + +typedef struct { + UINT type; + const char *name; +} QMI_NAME_T; + +#define qmi_name_item(type) {type, #type} + +#if 0 +static const QMI_NAME_T qmi_IFType[] = { +{USB_CTL_MSG_TYPE_QMI, "USB_CTL_MSG_TYPE_QMI"}, +}; + +static const QMI_NAME_T qmi_CtlFlags[] = { +qmi_name_item(QMICTL_CTL_FLAG_CMD), +qmi_name_item(QCQMI_CTL_FLAG_SERVICE), +}; + +static const QMI_NAME_T qmi_QMIType[] = { +qmi_name_item(QMUX_TYPE_CTL), +qmi_name_item(QMUX_TYPE_WDS), +qmi_name_item(QMUX_TYPE_DMS), +qmi_name_item(QMUX_TYPE_NAS), +qmi_name_item(QMUX_TYPE_QOS), +qmi_name_item(QMUX_TYPE_WMS), +qmi_name_item(QMUX_TYPE_PDS), +qmi_name_item(QMUX_TYPE_WDS_ADMIN), +qmi_name_item(QMUX_TYPE_COEX), +}; + +static const QMI_NAME_T qmi_ctl_CtlFlags[] = { +qmi_name_item(QMICTL_FLAG_REQUEST), +qmi_name_item(QMICTL_FLAG_RESPONSE), +qmi_name_item(QMICTL_FLAG_INDICATION), +}; +#endif + +static const QMI_NAME_T qmux_ctl_QMICTLType[] = { +// QMICTL Type +qmi_name_item(QMICTL_SET_INSTANCE_ID_REQ), // 0x0020 +qmi_name_item(QMICTL_SET_INSTANCE_ID_RESP), // 0x0020 +qmi_name_item(QMICTL_GET_VERSION_REQ), // 0x0021 +qmi_name_item(QMICTL_GET_VERSION_RESP), // 0x0021 +qmi_name_item(QMICTL_GET_CLIENT_ID_REQ), // 0x0022 +qmi_name_item(QMICTL_GET_CLIENT_ID_RESP), // 0x0022 +qmi_name_item(QMICTL_RELEASE_CLIENT_ID_REQ), // 0x0023 +qmi_name_item(QMICTL_RELEASE_CLIENT_ID_RESP), // 0x0023 +qmi_name_item(QMICTL_REVOKE_CLIENT_ID_IND), // 0x0024 +qmi_name_item(QMICTL_INVALID_CLIENT_ID_IND), // 0x0025 +qmi_name_item(QMICTL_SET_DATA_FORMAT_REQ), // 0x0026 +qmi_name_item(QMICTL_SET_DATA_FORMAT_RESP), // 0x0026 +qmi_name_item(QMICTL_SYNC_REQ), // 0x0027 +qmi_name_item(QMICTL_SYNC_RESP), // 0x0027 +qmi_name_item(QMICTL_SYNC_IND), // 0x0027 +}; + +static const QMI_NAME_T qmux_CtlFlags[] = { +qmi_name_item(QMUX_CTL_FLAG_TYPE_CMD), +qmi_name_item(QMUX_CTL_FLAG_TYPE_RSP), +qmi_name_item(QMUX_CTL_FLAG_TYPE_IND), +}; + + +static const QMI_NAME_T qmux_wds_Type[] = { +qmi_name_item(QMIWDS_SET_EVENT_REPORT_REQ), // 0x0001 +qmi_name_item(QMIWDS_SET_EVENT_REPORT_RESP), // 0x0001 +qmi_name_item(QMIWDS_EVENT_REPORT_IND), // 0x0001 +qmi_name_item(QMIWDS_START_NETWORK_INTERFACE_REQ), // 0x0020 +qmi_name_item(QMIWDS_START_NETWORK_INTERFACE_RESP), // 0x0020 +qmi_name_item(QMIWDS_STOP_NETWORK_INTERFACE_REQ), // 0x0021 +qmi_name_item(QMIWDS_STOP_NETWORK_INTERFACE_RESP), // 0x0021 +qmi_name_item(QMIWDS_GET_PKT_SRVC_STATUS_REQ), // 0x0022 +qmi_name_item(QMIWDS_GET_PKT_SRVC_STATUS_RESP), // 0x0022 +qmi_name_item(QMIWDS_GET_PKT_SRVC_STATUS_IND), // 0x0022 +qmi_name_item(QMIWDS_GET_CURRENT_CHANNEL_RATE_REQ), // 0x0023 +qmi_name_item(QMIWDS_GET_CURRENT_CHANNEL_RATE_RESP), // 0x0023 +qmi_name_item(QMIWDS_GET_PKT_STATISTICS_REQ), // 0x0024 +qmi_name_item(QMIWDS_GET_PKT_STATISTICS_RESP), // 0x0024 +qmi_name_item(QMIWDS_MODIFY_PROFILE_SETTINGS_REQ), // 0x0028 +qmi_name_item(QMIWDS_MODIFY_PROFILE_SETTINGS_RESP), // 0x0028 +qmi_name_item(QMIWDS_GET_PROFILE_SETTINGS_REQ), // 0x002B +qmi_name_item(QMIWDS_GET_PROFILE_SETTINGS_RESP), // 0x002BD +qmi_name_item(QMIWDS_GET_DEFAULT_SETTINGS_REQ), // 0x002C +qmi_name_item(QMIWDS_GET_DEFAULT_SETTINGS_RESP), // 0x002C +qmi_name_item(QMIWDS_GET_RUNTIME_SETTINGS_REQ), // 0x002D +qmi_name_item(QMIWDS_GET_RUNTIME_SETTINGS_RESP), // 0x002D +qmi_name_item(QMIWDS_GET_MIP_MODE_REQ), // 0x002F +qmi_name_item(QMIWDS_GET_MIP_MODE_RESP), // 0x002F +qmi_name_item(QMIWDS_GET_DATA_BEARER_REQ), // 0x0037 +qmi_name_item(QMIWDS_GET_DATA_BEARER_RESP), // 0x0037 +qmi_name_item(QMIWDS_DUN_CALL_INFO_REQ), // 0x0038 +qmi_name_item(QMIWDS_DUN_CALL_INFO_RESP), // 0x0038 +qmi_name_item(QMIWDS_DUN_CALL_INFO_IND), // 0x0038 +qmi_name_item(QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ), // 0x004D +qmi_name_item(QMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP), // 0x004D +qmi_name_item(QMIWDS_SET_AUTO_CONNECT_REQ), // 0x0051 +qmi_name_item(QMIWDS_SET_AUTO_CONNECT_RESP), // 0x0051 +qmi_name_item(QMIWDS_BIND_MUX_DATA_PORT_REQ), // 0x00A2 +qmi_name_item(QMIWDS_BIND_MUX_DATA_PORT_RESP), // 0x00A2 +}; + +static const QMI_NAME_T qmux_dms_Type[] = { +// ======================= DMS ============================== +qmi_name_item(QMIDMS_SET_EVENT_REPORT_REQ), // 0x0001 +qmi_name_item(QMIDMS_SET_EVENT_REPORT_RESP), // 0x0001 +qmi_name_item(QMIDMS_EVENT_REPORT_IND), // 0x0001 +qmi_name_item(QMIDMS_GET_DEVICE_CAP_REQ), // 0x0020 +qmi_name_item(QMIDMS_GET_DEVICE_CAP_RESP), // 0x0020 +qmi_name_item(QMIDMS_GET_DEVICE_MFR_REQ), // 0x0021 +qmi_name_item(QMIDMS_GET_DEVICE_MFR_RESP), // 0x0021 +qmi_name_item(QMIDMS_GET_DEVICE_MODEL_ID_REQ), // 0x0022 +qmi_name_item(QMIDMS_GET_DEVICE_MODEL_ID_RESP), // 0x0022 +qmi_name_item(QMIDMS_GET_DEVICE_REV_ID_REQ), // 0x0023 +qmi_name_item(QMIDMS_GET_DEVICE_REV_ID_RESP), // 0x0023 +qmi_name_item(QMIDMS_GET_MSISDN_REQ), // 0x0024 +qmi_name_item(QMIDMS_GET_MSISDN_RESP), // 0x0024 +qmi_name_item(QMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ), // 0x0025 +qmi_name_item(QMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP), // 0x0025 +qmi_name_item(QMIDMS_UIM_SET_PIN_PROTECTION_REQ), // 0x0027 +qmi_name_item(QMIDMS_UIM_SET_PIN_PROTECTION_RESP), // 0x0027 +qmi_name_item(QMIDMS_UIM_VERIFY_PIN_REQ), // 0x0028 +qmi_name_item(QMIDMS_UIM_VERIFY_PIN_RESP), // 0x0028 +qmi_name_item(QMIDMS_UIM_UNBLOCK_PIN_REQ), // 0x0029 +qmi_name_item(QMIDMS_UIM_UNBLOCK_PIN_RESP), // 0x0029 +qmi_name_item(QMIDMS_UIM_CHANGE_PIN_REQ), // 0x002A +qmi_name_item(QMIDMS_UIM_CHANGE_PIN_RESP), // 0x002A +qmi_name_item(QMIDMS_UIM_GET_PIN_STATUS_REQ), // 0x002B +qmi_name_item(QMIDMS_UIM_GET_PIN_STATUS_RESP), // 0x002B +qmi_name_item(QMIDMS_GET_DEVICE_HARDWARE_REV_REQ), // 0x002C +qmi_name_item(QMIDMS_GET_DEVICE_HARDWARE_REV_RESP), // 0x002C +qmi_name_item(QMIDMS_GET_OPERATING_MODE_REQ), // 0x002D +qmi_name_item(QMIDMS_GET_OPERATING_MODE_RESP), // 0x002D +qmi_name_item(QMIDMS_SET_OPERATING_MODE_REQ), // 0x002E +qmi_name_item(QMIDMS_SET_OPERATING_MODE_RESP), // 0x002E +qmi_name_item(QMIDMS_GET_ACTIVATED_STATUS_REQ), // 0x0031 +qmi_name_item(QMIDMS_GET_ACTIVATED_STATUS_RESP), // 0x0031 +qmi_name_item(QMIDMS_ACTIVATE_AUTOMATIC_REQ), // 0x0032 +qmi_name_item(QMIDMS_ACTIVATE_AUTOMATIC_RESP), // 0x0032 +qmi_name_item(QMIDMS_ACTIVATE_MANUAL_REQ), // 0x0033 +qmi_name_item(QMIDMS_ACTIVATE_MANUAL_RESP), // 0x0033 +qmi_name_item(QMIDMS_UIM_GET_ICCID_REQ), // 0x003C +qmi_name_item(QMIDMS_UIM_GET_ICCID_RESP), // 0x003C +qmi_name_item(QMIDMS_UIM_GET_CK_STATUS_REQ), // 0x0040 +qmi_name_item(QMIDMS_UIM_GET_CK_STATUS_RESP), // 0x0040 +qmi_name_item(QMIDMS_UIM_SET_CK_PROTECTION_REQ), // 0x0041 +qmi_name_item(QMIDMS_UIM_SET_CK_PROTECTION_RESP), // 0x0041 +qmi_name_item(QMIDMS_UIM_UNBLOCK_CK_REQ), // 0x0042 +qmi_name_item(QMIDMS_UIM_UNBLOCK_CK_RESP), // 0x0042 +qmi_name_item(QMIDMS_UIM_GET_IMSI_REQ), // 0x0043 +qmi_name_item(QMIDMS_UIM_GET_IMSI_RESP), // 0x0043 +qmi_name_item(QMIDMS_UIM_GET_STATE_REQ), // 0x0044 +qmi_name_item(QMIDMS_UIM_GET_STATE_RESP), // 0x0044 +qmi_name_item(QMIDMS_GET_BAND_CAP_REQ), // 0x0045 +qmi_name_item(QMIDMS_GET_BAND_CAP_RESP), // 0x0045 +}; + +static const QMI_NAME_T qmux_qos_Type[] = { +qmi_name_item( QMI_QOS_SET_EVENT_REPORT_REQ), // 0x0001 +qmi_name_item( QMI_QOS_SET_EVENT_REPORT_RESP), // 0x0001 +qmi_name_item( QMI_QOS_SET_EVENT_REPORT_IND), // 0x0001 +qmi_name_item( QMI_QOS_BIND_DATA_PORT_REQ), // 0x002B +qmi_name_item( QMI_QOS_BIND_DATA_PORT_RESP), // 0x002B +qmi_name_item( QMI_QOS_INDICATION_REGISTER_REQ), // 0x002F +qmi_name_item( QMI_QOS_INDICATION_REGISTER_RESP), // 0x002F +qmi_name_item( QMI_QOS_GLOBAL_QOS_FLOW_IND), // 0x0031 +qmi_name_item( QMI_QOS_GET_QOS_INFO_REQ), // 0x0033 +qmi_name_item( QMI_QOS_GET_QOS_INFO_RESP), // 0x0033 +}; + +static const QMI_NAME_T qmux_nas_Type[] = { +// ======================= NAS ============================== +qmi_name_item(QMINAS_SET_EVENT_REPORT_REQ), // 0x0002 +qmi_name_item(QMINAS_SET_EVENT_REPORT_RESP), // 0x0002 +qmi_name_item(QMINAS_EVENT_REPORT_IND), // 0x0002 +qmi_name_item(QMINAS_GET_SIGNAL_STRENGTH_REQ), // 0x0020 +qmi_name_item(QMINAS_GET_SIGNAL_STRENGTH_RESP), // 0x0020 +qmi_name_item(QMINAS_PERFORM_NETWORK_SCAN_REQ), // 0x0021 +qmi_name_item(QMINAS_PERFORM_NETWORK_SCAN_RESP), // 0x0021 +qmi_name_item(QMINAS_INITIATE_NW_REGISTER_REQ), // 0x0022 +qmi_name_item(QMINAS_INITIATE_NW_REGISTER_RESP), // 0x0022 +qmi_name_item(QMINAS_INITIATE_ATTACH_REQ), // 0x0023 +qmi_name_item(QMINAS_INITIATE_ATTACH_RESP), // 0x0023 +qmi_name_item(QMINAS_GET_SERVING_SYSTEM_REQ), // 0x0024 +qmi_name_item(QMINAS_GET_SERVING_SYSTEM_RESP), // 0x0024 +qmi_name_item(QMINAS_SERVING_SYSTEM_IND), // 0x0024 +qmi_name_item(QMINAS_GET_HOME_NETWORK_REQ), // 0x0025 +qmi_name_item(QMINAS_GET_HOME_NETWORK_RESP), // 0x0025 +qmi_name_item(QMINAS_GET_PREFERRED_NETWORK_REQ), // 0x0026 +qmi_name_item(QMINAS_GET_PREFERRED_NETWORK_RESP), // 0x0026 +qmi_name_item(QMINAS_SET_PREFERRED_NETWORK_REQ), // 0x0027 +qmi_name_item(QMINAS_SET_PREFERRED_NETWORK_RESP), // 0x0027 +qmi_name_item(QMINAS_GET_FORBIDDEN_NETWORK_REQ), // 0x0028 +qmi_name_item(QMINAS_GET_FORBIDDEN_NETWORK_RESP), // 0x0028 +qmi_name_item(QMINAS_SET_FORBIDDEN_NETWORK_REQ), // 0x0029 +qmi_name_item(QMINAS_SET_FORBIDDEN_NETWORK_RESP), // 0x0029 +qmi_name_item(QMINAS_SET_TECHNOLOGY_PREF_REQ), // 0x002A +qmi_name_item(QMINAS_SET_TECHNOLOGY_PREF_RESP), // 0x002A +qmi_name_item(QMINAS_GET_RF_BAND_INFO_REQ), // 0x0031 +qmi_name_item(QMINAS_GET_RF_BAND_INFO_RESP), // 0x0031 +qmi_name_item(QMINAS_GET_CELL_LOCATION_INFO_REQ), +qmi_name_item(QMINAS_GET_CELL_LOCATION_INFO_RESP), +qmi_name_item(QMINAS_GET_PLMN_NAME_REQ), // 0x0044 +qmi_name_item(QMINAS_GET_PLMN_NAME_RESP), // 0x0044 +qmi_name_item(QUECTEL_PACKET_TRANSFER_START_IND), // 0X100 +qmi_name_item(QUECTEL_PACKET_TRANSFER_END_IND), // 0X101 +qmi_name_item(QMINAS_GET_SYS_INFO_REQ), // 0x004D +qmi_name_item(QMINAS_GET_SYS_INFO_RESP), // 0x004D +qmi_name_item(QMINAS_SYS_INFO_IND), // 0x004D +qmi_name_item(QMINAS_GET_SIG_INFO_REQ), +qmi_name_item(QMINAS_GET_SIG_INFO_RESP), + +}; + +static const QMI_NAME_T qmux_wms_Type[] = { +// ======================= WMS ============================== +qmi_name_item(QMIWMS_SET_EVENT_REPORT_REQ), // 0x0001 +qmi_name_item(QMIWMS_SET_EVENT_REPORT_RESP), // 0x0001 +qmi_name_item(QMIWMS_EVENT_REPORT_IND), // 0x0001 +qmi_name_item(QMIWMS_RAW_SEND_REQ), // 0x0020 +qmi_name_item(QMIWMS_RAW_SEND_RESP), // 0x0020 +qmi_name_item(QMIWMS_RAW_WRITE_REQ), // 0x0021 +qmi_name_item(QMIWMS_RAW_WRITE_RESP), // 0x0021 +qmi_name_item(QMIWMS_RAW_READ_REQ), // 0x0022 +qmi_name_item(QMIWMS_RAW_READ_RESP), // 0x0022 +qmi_name_item(QMIWMS_MODIFY_TAG_REQ), // 0x0023 +qmi_name_item(QMIWMS_MODIFY_TAG_RESP), // 0x0023 +qmi_name_item(QMIWMS_DELETE_REQ), // 0x0024 +qmi_name_item(QMIWMS_DELETE_RESP), // 0x0024 +qmi_name_item(QMIWMS_GET_MESSAGE_PROTOCOL_REQ), // 0x0030 +qmi_name_item(QMIWMS_GET_MESSAGE_PROTOCOL_RESP), // 0x0030 +qmi_name_item(QMIWMS_LIST_MESSAGES_REQ), // 0x0031 +qmi_name_item(QMIWMS_LIST_MESSAGES_RESP), // 0x0031 +qmi_name_item(QMIWMS_GET_SMSC_ADDRESS_REQ), // 0x0034 +qmi_name_item(QMIWMS_GET_SMSC_ADDRESS_RESP), // 0x0034 +qmi_name_item(QMIWMS_SET_SMSC_ADDRESS_REQ), // 0x0035 +qmi_name_item(QMIWMS_SET_SMSC_ADDRESS_RESP), // 0x0035 +qmi_name_item(QMIWMS_GET_STORE_MAX_SIZE_REQ), // 0x0036 +qmi_name_item(QMIWMS_GET_STORE_MAX_SIZE_RESP), // 0x0036 +}; + +static const QMI_NAME_T qmux_wds_admin_Type[] = { +qmi_name_item(QMIWDS_ADMIN_SET_DATA_FORMAT_REQ), // 0x0020 +qmi_name_item(QMIWDS_ADMIN_SET_DATA_FORMAT_RESP), // 0x0020 +qmi_name_item(QMIWDS_ADMIN_GET_DATA_FORMAT_REQ), // 0x0021 +qmi_name_item(QMIWDS_ADMIN_GET_DATA_FORMAT_RESP), // 0x0021 +qmi_name_item(QMIWDS_ADMIN_SET_QMAP_SETTINGS_REQ), // 0x002B +qmi_name_item(QMIWDS_ADMIN_SET_QMAP_SETTINGS_RESP), // 0x002B +qmi_name_item(QMIWDS_ADMIN_GET_QMAP_SETTINGS_REQ), // 0x002C +qmi_name_item(QMIWDS_ADMIN_GET_QMAP_SETTINGS_RESP), // 0x002C +qmi_name_item(QMI_WDA_SET_LOOPBACK_CONFIG_REQ), // 0x002F +qmi_name_item(QMI_WDA_SET_LOOPBACK_CONFIG_RESP), // 0x002F +qmi_name_item(QMI_WDA_SET_LOOPBACK_CONFIG_IND), // 0x002F +}; + +static const QMI_NAME_T qmux_uim_Type[] = { +qmi_name_item( QMIUIM_READ_TRANSPARENT_REQ), // 0x0020 +qmi_name_item( QMIUIM_READ_TRANSPARENT_RESP), // 0x0020 +qmi_name_item( QMIUIM_READ_TRANSPARENT_IND), // 0x0020 +qmi_name_item( QMIUIM_READ_RECORD_REQ), // 0x0021 +qmi_name_item( QMIUIM_READ_RECORD_RESP), // 0x0021 +qmi_name_item( QMIUIM_READ_RECORD_IND), // 0x0021 +qmi_name_item( QMIUIM_WRITE_TRANSPARENT_REQ), // 0x0022 +qmi_name_item( QMIUIM_WRITE_TRANSPARENT_RESP), // 0x0022 +qmi_name_item( QMIUIM_WRITE_TRANSPARENT_IND), // 0x0022 +qmi_name_item( QMIUIM_WRITE_RECORD_REQ), // 0x0023 +qmi_name_item( QMIUIM_WRITE_RECORD_RESP), // 0x0023 +qmi_name_item( QMIUIM_WRITE_RECORD_IND), // 0x0023 +qmi_name_item( QMIUIM_SET_PIN_PROTECTION_REQ), // 0x0025 +qmi_name_item( QMIUIM_SET_PIN_PROTECTION_RESP), // 0x0025 +qmi_name_item( QMIUIM_SET_PIN_PROTECTION_IND), // 0x0025 +qmi_name_item( QMIUIM_VERIFY_PIN_REQ), // 0x0026 +qmi_name_item( QMIUIM_VERIFY_PIN_RESP), // 0x0026 +qmi_name_item( QMIUIM_VERIFY_PIN_IND), // 0x0026 +qmi_name_item( QMIUIM_UNBLOCK_PIN_REQ), // 0x0027 +qmi_name_item( QMIUIM_UNBLOCK_PIN_RESP), // 0x0027 +qmi_name_item( QMIUIM_UNBLOCK_PIN_IND), // 0x0027 +qmi_name_item( QMIUIM_CHANGE_PIN_REQ), // 0x0028 +qmi_name_item( QMIUIM_CHANGE_PIN_RESP), // 0x0028 +qmi_name_item( QMIUIM_CHANGE_PIN_IND), // 0x0028 +qmi_name_item( QMIUIM_DEPERSONALIZATION_REQ), // 0x0029 +qmi_name_item( QMIUIM_DEPERSONALIZATION_RESP), // 0x0029 +qmi_name_item( QMIUIM_EVENT_REG_REQ), // 0x002E +qmi_name_item( QMIUIM_EVENT_REG_RESP), // 0x002E +qmi_name_item( QMIUIM_GET_CARD_STATUS_REQ), // 0x002F +qmi_name_item( QMIUIM_GET_CARD_STATUS_RESP), // 0x002F +qmi_name_item( QMIUIM_STATUS_CHANGE_IND), // 0x0032 +}; + +static const QMI_NAME_T qmux_coex_Type[] = { +qmi_name_item(QMI_COEX_GET_WWAN_STATE_REQ), // 0x0022 +qmi_name_item(QMI_COEX_GET_WWAN_STATE_RESP), // 0x0022 +}; + +static const char * qmi_name_get(const QMI_NAME_T *table, size_t size, int type, const char *tag) { + static char unknow[40]; + size_t i; + + if (qmux_CtlFlags == table) { + if (!strcmp(tag, "_REQ")) + tag = "_CMD"; + else if (!strcmp(tag, "_RESP")) + tag = "_RSP"; + } + + for (i = 0; i < size; i++) { + if (table[i].type == (UINT)type) { + if (!tag || (strstr(table[i].name, tag))) + return table[i].name; + } + } + sprintf(unknow, "unknow_%x", type); + return unknow; +} + +#define QMI_NAME(table, type) qmi_name_get(table, sizeof(table) / sizeof(table[0]), type, 0) +#define QMUX_NAME(table, type, tag) qmi_name_get(table, sizeof(table) / sizeof(table[0]), type, tag) + +void dump_tlv(PQCQMUX_MSG_HDR pQMUXMsgHdr) { + int TLVFind = 0; + int i; + //dbg("QCQMUX_TLV-----------------------------------\n"); + //dbg("{Type,\tLength,\tValue}\n"); + + while (1) { + PQMI_TLV_HDR TLVHdr = GetTLV(pQMUXMsgHdr, 0x1000 + (++TLVFind)); + if (TLVHdr == NULL) + break; + + //if ((TLVHdr->TLVType == 0x02) && ((USHORT *)(TLVHdr+1))[0]) + { + dbg("{%02x,\t%04x,\t", TLVHdr->TLVType, le16_to_cpu(TLVHdr->TLVLength)); + for (i = 0; i < le16_to_cpu(TLVHdr->TLVLength); i++) { + dbg("%02x ", ((UCHAR *)(TLVHdr+1))[i]); + } + dbg("}\n"); + } + } // while +} + +void dump_ctl(PQCQMICTL_MSG_HDR CTLHdr) { + const char *tag; + + //dbg("QCQMICTL_MSG--------------------------------------------\n"); + //dbg("CtlFlags: %02x\t\t%s\n", CTLHdr->CtlFlags, QMI_NAME(qmi_ctl_CtlFlags, CTLHdr->CtlFlags)); + dbg("TransactionId: %02x\n", CTLHdr->TransactionId); + switch (CTLHdr->CtlFlags) { + case QMICTL_FLAG_REQUEST: tag = "_REQ"; break; + case QMICTL_FLAG_RESPONSE: tag = "_RESP"; break; + case QMICTL_FLAG_INDICATION: tag = "_IND"; break; + default: tag = 0; break; + } + dbg("QMICTLType: %04x\t%s\n", le16_to_cpu(CTLHdr->QMICTLType), + QMUX_NAME(qmux_ctl_QMICTLType, le16_to_cpu(CTLHdr->QMICTLType), tag)); + dbg("Length: %04x\n", le16_to_cpu(CTLHdr->Length)); + + dump_tlv((PQCQMUX_MSG_HDR)(&CTLHdr->QMICTLType)); +} + +int dump_qmux(QMI_SERVICE_TYPE serviceType, PQCQMUX_HDR QMUXHdr) { + PQCQMUX_MSG_HDR QMUXMsgHdr = (PQCQMUX_MSG_HDR) (QMUXHdr + 1); + const char *tag; + + //dbg("QCQMUX--------------------------------------------\n"); + switch (QMUXHdr->CtlFlags&QMUX_CTL_FLAG_MASK_TYPE) { + case QMUX_CTL_FLAG_TYPE_CMD: tag = "_REQ"; break; + case QMUX_CTL_FLAG_TYPE_RSP: tag = "_RESP"; break; + case QMUX_CTL_FLAG_TYPE_IND: tag = "_IND"; break; + default: tag = 0; break; + } + //dbg("CtlFlags: %02x\t\t%s\n", QMUXHdr->CtlFlags, QMUX_NAME(qmux_CtlFlags, QMUXHdr->CtlFlags, tag)); + dbg("TransactionId: %04x\n", le16_to_cpu(QMUXHdr->TransactionId)); + + //dbg("QCQMUX_MSG_HDR-----------------------------------\n"); + switch (serviceType) { + case QMUX_TYPE_DMS: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_dms_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_NAS: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_nas_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_WDS: + case QMUX_TYPE_WDS_IPV6: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_wds_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_WMS: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_wms_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_WDS_ADMIN: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_wds_admin_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_UIM: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_uim_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_PDS: + case QMUX_TYPE_QOS: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_qos_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_COEX: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), + QMUX_NAME(qmux_coex_Type, le16_to_cpu(QMUXMsgHdr->Type), tag)); + break; + case QMUX_TYPE_CTL: + default: + dbg("Type: %04x\t%s\n", le16_to_cpu(QMUXMsgHdr->Type), "PDS/QOS/CTL/unknown!"); + break; + } + dbg("Length: %04x\n", le16_to_cpu(QMUXMsgHdr->Length)); + + dump_tlv(QMUXMsgHdr); + + return 0; +} + +void dump_qmi(void *dataBuffer, int dataLen) +{ + PQCQMI_HDR QMIHdr = (PQCQMI_HDR)dataBuffer; + PQCQMUX_HDR QMUXHdr = (PQCQMUX_HDR) (QMIHdr + 1); + PQCQMICTL_MSG_HDR CTLHdr = (PQCQMICTL_MSG_HDR) (QMIHdr + 1); + + int i; + + if (!debug_qmi) + return; + + pthread_mutex_lock(&dumpQMIMutex); + line[0] = 0; + for (i = 0; i < dataLen; i++) { + dbg("%02x ", ((unsigned char *)dataBuffer)[i]); + } + dbg_time("%s", line); + line[0] = 0; + + //dbg("QCQMI_HDR-----------------------------------------"); + //dbg("IFType: %02x\t\t%s", QMIHdr->IFType, QMI_NAME(qmi_IFType, QMIHdr->IFType)); + //dbg("Length: %04x", le16_to_cpu(QMIHdr->Length)); + //dbg("CtlFlags: %02x\t\t%s", QMIHdr->CtlFlags, QMI_NAME(qmi_CtlFlags, QMIHdr->CtlFlags)); + //dbg("QMIType: %02x\t\t%s", QMIHdr->QMIType, QMI_NAME(qmi_QMIType, QMIHdr->QMIType)); + //dbg("ClientId: %02x", QMIHdr->ClientId); + + if (QMIHdr->QMIType == QMUX_TYPE_CTL) { + dump_ctl(CTLHdr); + } else { + dump_qmux(QMIHdr->QMIType, QMUXHdr); + } + dbg_time("%s", line); + pthread_mutex_unlock(&dumpQMIMutex); +} diff --git a/wwan/app/quectel_cm_5G/src/QCQMUX.h b/wwan/app/quectel_cm_5G/src/QCQMUX.h new file mode 100644 index 0000000..b1feb1d --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/QCQMUX.h @@ -0,0 +1,4310 @@ +/****************************************************************************** + @file QCQMUX.h + + DESCRIPTION + This module contains QMI QMUX module. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ + + +#ifndef QCQMUX_H +#define QCQMUX_H + +#include "QCQMI.h" + +#pragma pack(push, 1) + +#define QMIWDS_SET_EVENT_REPORT_REQ 0x0001 +#define QMIWDS_SET_EVENT_REPORT_RESP 0x0001 +#define QMIWDS_EVENT_REPORT_IND 0x0001 +#define QMIWDS_START_NETWORK_INTERFACE_REQ 0x0020 +#define QMIWDS_START_NETWORK_INTERFACE_RESP 0x0020 +#define QMIWDS_STOP_NETWORK_INTERFACE_REQ 0x0021 +#define QMIWDS_STOP_NETWORK_INTERFACE_RESP 0x0021 +#define QMIWDS_GET_PKT_SRVC_STATUS_REQ 0x0022 +#define QMIWDS_GET_PKT_SRVC_STATUS_RESP 0x0022 +#define QMIWDS_GET_PKT_SRVC_STATUS_IND 0x0022 +#define QMIWDS_GET_CURRENT_CHANNEL_RATE_REQ 0x0023 +#define QMIWDS_GET_CURRENT_CHANNEL_RATE_RESP 0x0023 +#define QMIWDS_GET_PKT_STATISTICS_REQ 0x0024 +#define QMIWDS_GET_PKT_STATISTICS_RESP 0x0024 +#define QMIWDS_CREATE_PROFILE_REQ 0x0027 +#define QMIWDS_CREATE_PROFILE_RESP 0x0027 +#define QMIWDS_MODIFY_PROFILE_SETTINGS_REQ 0x0028 +#define QMIWDS_MODIFY_PROFILE_SETTINGS_RESP 0x0028 +#define QMIWDS_GET_PROFILE_LIST_REQ 0x002A +#define QMIWDS_GET_PROFILE_LIST_RESP 0x002A +#define QMIWDS_GET_PROFILE_SETTINGS_REQ 0x002B +#define QMIWDS_GET_PROFILE_SETTINGS_RESP 0x002B +#define QMIWDS_GET_DEFAULT_SETTINGS_REQ 0x002C +#define QMIWDS_GET_DEFAULT_SETTINGS_RESP 0x002C +#define QMIWDS_GET_RUNTIME_SETTINGS_REQ 0x002D +#define QMIWDS_GET_RUNTIME_SETTINGS_RESP 0x002D +#define QMIWDS_GET_MIP_MODE_REQ 0x002F +#define QMIWDS_GET_MIP_MODE_RESP 0x002F +#define QMIWDS_GET_DATA_BEARER_REQ 0x0037 +#define QMIWDS_GET_DATA_BEARER_RESP 0x0037 +#define QMIWDS_DUN_CALL_INFO_REQ 0x0038 +#define QMIWDS_DUN_CALL_INFO_RESP 0x0038 +#define QMIWDS_DUN_CALL_INFO_IND 0x0038 +#define QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ 0x004D +#define QMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP 0x004D +#define QMIWDS_SET_AUTO_CONNECT_REQ 0x0051 +#define QMIWDS_SET_AUTO_CONNECT_RESP 0x0051 +#define QMIWDS_BIND_MUX_DATA_PORT_REQ 0x00A2 +#define QMIWDS_BIND_MUX_DATA_PORT_RESP 0x00A2 + + +// Stats masks +#define QWDS_STAT_MASK_TX_PKT_OK 0x00000001 +#define QWDS_STAT_MASK_RX_PKT_OK 0x00000002 +#define QWDS_STAT_MASK_TX_PKT_ER 0x00000004 +#define QWDS_STAT_MASK_RX_PKT_ER 0x00000008 +#define QWDS_STAT_MASK_TX_PKT_OF 0x00000010 +#define QWDS_STAT_MASK_RX_PKT_OF 0x00000020 + +// TLV Types for xfer statistics +#define TLV_WDS_TX_GOOD_PKTS 0x10 +#define TLV_WDS_RX_GOOD_PKTS 0x11 +#define TLV_WDS_TX_ERROR 0x12 +#define TLV_WDS_RX_ERROR 0x13 +#define TLV_WDS_TX_OVERFLOW 0x14 +#define TLV_WDS_RX_OVERFLOW 0x15 +#define TLV_WDS_CHANNEL_RATE 0x16 +#define TLV_WDS_DATA_BEARER 0x17 +#define TLV_WDS_DORMANCY_STATUS 0x18 + +#define QWDS_PKT_DATA_UNKNOW 0x00 +#define QWDS_PKT_DATA_DISCONNECTED 0x01 +#define QWDS_PKT_DATA_CONNECTED 0x02 +#define QWDS_PKT_DATA_SUSPENDED 0x03 +#define QWDS_PKT_DATA_AUTHENTICATING 0x04 + +#define QMIWDS_ADMIN_SET_DATA_FORMAT_REQ 0x0020 +#define QMIWDS_ADMIN_SET_DATA_FORMAT_RESP 0x0020 +#define QMIWDS_ADMIN_GET_DATA_FORMAT_REQ 0x0021 +#define QMIWDS_ADMIN_GET_DATA_FORMAT_RESP 0x0021 +#define QMIWDS_ADMIN_SET_QMAP_SETTINGS_REQ 0x002B +#define QMIWDS_ADMIN_SET_QMAP_SETTINGS_RESP 0x002B +#define QMIWDS_ADMIN_GET_QMAP_SETTINGS_REQ 0x002C +#define QMIWDS_ADMIN_GET_QMAP_SETTINGS_RESP 0x002C +#define QMI_WDA_SET_LOOPBACK_CONFIG_REQ 0x002F +#define QMI_WDA_SET_LOOPBACK_CONFIG_RESP 0x002F +#define QMI_WDA_SET_LOOPBACK_CONFIG_IND 0x002F + +#define NETWORK_DESC_ENCODING_OCTET 0x00 +#define NETWORK_DESC_ENCODING_EXTPROTOCOL 0x01 +#define NETWORK_DESC_ENCODING_7BITASCII 0x02 +#define NETWORK_DESC_ENCODING_IA5 0x03 +#define NETWORK_DESC_ENCODING_UNICODE 0x04 +#define NETWORK_DESC_ENCODING_SHIFTJIS 0x05 +#define NETWORK_DESC_ENCODING_KOREAN 0x06 +#define NETWORK_DESC_ENCODING_LATINH 0x07 +#define NETWORK_DESC_ENCODING_LATIN 0x08 +#define NETWORK_DESC_ENCODING_GSM7BIT 0x09 +#define NETWORK_DESC_ENCODING_GSMDATA 0x0A +#define NETWORK_DESC_ENCODING_UNKNOWN 0xFF + +typedef struct _QMIWDS_ADMIN_SET_DATA_FORMAT +{ + USHORT Type; // QMUX type 0x0000 + USHORT Length; +} __attribute__ ((packed)) QMIWDS_ADMIN_SET_DATA_FORMAT, *PQMIWDS_ADMIN_SET_DATA_FORMAT; + +typedef struct _QMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR QOSSetting; +} __attribute__ ((packed)) QMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS, *PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS; + +typedef struct _QMIWDS_ADMIN_SET_DATA_FORMAT_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG Value; +} __attribute__ ((packed)) QMIWDS_ADMIN_SET_DATA_FORMAT_TLV, *PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV; + +typedef struct _QMIWDS_ENDPOINT_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG ep_type; + ULONG iface_id; +} __attribute__ ((packed)) QMIWDS_ENDPOINT_TLV, *PQMIWDS_ENDPOINT_TLV; + +typedef struct _QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG +{ + USHORT Type; + USHORT Length; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS QosDataFormatTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UnderlyingLinkLayerProtocolTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UplinkDataAggregationProtocolTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV DownlinkDataAggregationProtocolTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV DownlinkDataAggregationMaxDatagramsTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV DownlinkDataAggregationMaxSizeTlv; + QMIWDS_ENDPOINT_TLV epTlv; +#ifdef QUECTEL_UL_DATA_AGG + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV DlMinimumPassingTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UplinkDataAggregationMaxDatagramsTlv; + QMIWDS_ADMIN_SET_DATA_FORMAT_TLV UplinkDataAggregationMaxSizeTlv; +#endif +} __attribute__ ((packed)) QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG, *PQMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG; + +typedef struct _QMI_U8_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR TLVVaule; +} __attribute__ ((packed)) QMI_U8_TLV, *PQMI_U8_TLV; + +typedef struct _QMI_U32_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG TLVVaule; +} __attribute__ ((packed)) QMI_U32_TLV, *PQMI_U32_TLV; + +typedef struct _QMI_WDA_SET_LOOPBACK_CONFIG_REQ_MSG { + USHORT Type; + USHORT Length; + QMI_U8_TLV loopback_state; //0x01 + QMI_U32_TLV replication_factor; //0x10 +} __attribute__ ((packed)) QMI_WDA_SET_LOOPBACK_CONFIG_REQ_MSG, *PQMI_WDA_SET_LOOPBACK_CONFIG_REQ_MSG; + +typedef struct _QMI_WDA_SET_LOOPBACK_CONFIG_IND_MSG +{ + USHORT Type; + USHORT Length; + QMI_U8_TLV loopback_state; //0x01 + QMI_U32_TLV replication_factor; //0x10 +} __attribute__ ((packed)) QMI_WDA_SET_LOOPBACK_CONFIG_IND_MSG, *PQMI_WDA_SET_LOOPBACK_CONFIG_IND_MSG; + +#if 0 +typedef enum _QMI_RETURN_CODES { + QMI_SUCCESS = 0, + QMI_SUCCESS_NOT_COMPLETE, + QMI_FAILURE +}QMI_RETURN_CODES; + +typedef struct _QMIWDS_GET_PKT_SRVC_STATUS_REQ_MSG +{ + USHORT Type; // 0x0022 + USHORT Length; // 0x0000 +} QMIWDS_GET_PKT_SRVC_STATUS_REQ_MSG, *PQMIWDS_GET_PKT_SRVC_STATUS_REQ_MSG; + +typedef struct _QMIWDS_GET_PKT_SRVC_STATUS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLVType2; + USHORT TLVLength2; + UCHAR ConnectionStatus; // 0x01: QWDS_PKT_DATAC_DISCONNECTED + // 0x02: QWDS_PKT_DATA_CONNECTED + // 0x03: QWDS_PKT_DATA_SUSPENDED + // 0x04: QWDS_PKT_DATA_AUTHENTICATING +} QMIWDS_GET_PKT_SRVC_STATUS_RESP_MSG, *PQMIWDS_GET_PKT_SRVC_STATUS_RESP_MSG; + +typedef struct _QMIWDS_GET_PKT_SRVC_STATUS_IND_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ConnectionStatus; // 0x01: QWDS_PKT_DATAC_DISCONNECTED + // 0x02: QWDS_PKT_DATA_CONNECTED + // 0x03: QWDS_PKT_DATA_SUSPENDED + UCHAR ReconfigRequired; // 0x00: No need to reconfigure + // 0x01: Reconfiguration required +} QMIWDS_GET_PKT_SRVC_STATUS_IND_MSG, *PQMIWDS_GET_PKT_SRVC_STATUS_IND_MSG; + +typedef struct _WDS_PKT_SRVC_IP_FAMILY_TLV +{ + UCHAR TLVType; // 0x12 + USHORT TLVLength; // 1 + UCHAR IpFamily; // IPV4-0x04, IPV6-0x06 +} WDS_PKT_SRVC_IP_FAMILY_TLV, *PWDS_PKT_SRVC_IP_FAMILY_TLV; + +typedef struct _QMIWDS_DUN_CALL_INFO_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + ULONG Mask; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR ReportConnectionStatus; +} QMIWDS_DUN_CALL_INFO_REQ_MSG, *PQMIWDS_DUN_CALL_INFO_REQ_MSG; + +typedef struct _QMIWDS_DUN_CALL_INFO_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWDS_DUN_CALL_INFO_RESP_MSG, *PQMIWDS_DUN_CALL_INFO_RESP_MSG; + +typedef struct _QMIWDS_DUN_CALL_INFO_IND_MSG +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ConnectionStatus; +} QMIWDS_DUN_CALL_INFO_IND_MSG, *PQMIWDS_DUN_CALL_INFO_IND_MSG; + +typedef struct _QMIWDS_GET_CURRENT_CHANNEL_RATE_REQ_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; +} QMIWDS_GET_CURRENT_CHANNEL_RATE_REQ_MSG, *PQMIWDS_GET_CURRENT_CHANNEL_RATE_REQ_MSG; + +typedef struct _QMIWDS_GET_CURRENT_CHANNEL_RATE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 16 + //ULONG CallHandle; // Context corresponding to reported channel + ULONG CurrentTxRate; // bps + ULONG CurrentRxRate; // bps + ULONG ServingSystemTxRate; // bps + ULONG ServingSystemRxRate; // bps + +} QMIWDS_GET_CURRENT_CHANNEL_RATE_RESP_MSG, *PQMIWDS_GET_CURRENT_CHANNEL_RATE_RESP; + +#define QWDS_EVENT_REPORT_MASK_RATES 0x01 +#define QWDS_EVENT_REPORT_MASK_STATS 0x02 + +#ifdef QCUSB_MUX_PROTOCOL +#error code not present +#endif // QCUSB_MUX_PROTOCOL + +typedef struct _QMIWDS_SET_EVENT_REPORT_REQ_MSG +{ + USHORT Type; // QMUX type 0x0042 + USHORT Length; + + UCHAR TLVType; // 0x10 -- current channel rate indicator + USHORT TLVLength; // 1 + UCHAR Mode; // 0-do not report; 1-report when rate changes + + UCHAR TLV2Type; // 0x11 + USHORT TLV2Length; // 5 + UCHAR StatsPeriod; // seconds between reports; 0-do not report + ULONG StatsMask; // + + UCHAR TLV3Type; // 0x12 -- current data bearer indicator + USHORT TLV3Length; // 1 + UCHAR Mode3; // 0-do not report; 1-report when changes + + UCHAR TLV4Type; // 0x13 -- dormancy status indicator + USHORT TLV4Length; // 1 + UCHAR DormancyStatus; // 0-do not report; 1-report when changes +} QMIWDS_SET_EVENT_REPORT_REQ_MSG, *PQMIWDS_SET_EVENT_REPORT_REQ_MSG; + +typedef struct _QMIWDS_SET_EVENT_REPORT_RESP_MSG +{ + USHORT Type; // QMUX type 0x0042 + USHORT Length; + + UCHAR TLVType; // 0x02 result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_NO_BATTERY + // QMI_ERR_FAULT +} QMIWDS_SET_EVENT_REPORT_RESP_MSG, *PQMIWDS_SET_EVENT_REPORT_RESP_MSG; + +typedef struct _QMIWDS_EVENT_REPORT_IND_MSG +{ + USHORT Type; // QMUX type 0x0001 + USHORT Length; +} QMIWDS_EVENT_REPORT_IND_MSG, *PQMIWDS_EVENT_REPORT_IND_MSG; + +// PQCTLV_PKT_STATISTICS + +typedef struct _QMIWDS_EVENT_REPORT_IND_CHAN_RATE_TLV +{ + UCHAR Type; + USHORT Length; // 8 + ULONG TxRate; + ULONG RxRate; +} QMIWDS_EVENT_REPORT_IND_CHAN_RATE_TLV, *PQMIWDS_EVENT_REPORT_IND_CHAN_RATE_TLV; + +#ifdef QCUSB_MUX_PROTOCOL +#error code not present +#endif // QCUSB_MUX_PROTOCOL + +typedef struct _QMIWDS_GET_PKT_STATISTICS_REQ_MSG +{ + USHORT Type; // QMUX type 0x0041 + USHORT Length; + UCHAR TLVType; // 0x01 + USHORT TLVLength; // 4 + ULONG StateMask; // 0x00000001 tx success packets + // 0x00000002 rx success packets + // 0x00000004 rx packet errors (checksum) + // 0x00000008 rx packets dropped (memory) + +} QMIWDS_GET_PKT_STATISTICS_REQ_MSG, *PQMIWDS_GET_PKT_STATISTICS_REQ_MSG; + +typedef struct _QMIWDS_GET_PKT_STATISTICS_RESP_MSG +{ + USHORT Type; // QMUX type 0x0041 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMIWDS_GET_PKT_STATISTICS_RESP_MSG, *PQMIWDS_GET_PKT_STATISTICS_RESP_MSG; + +// optional TLV for stats +typedef struct _QCTLV_PKT_STATISTICS +{ + UCHAR TLVType; // see above definitions for TLV types + USHORT TLVLength; // 4 + ULONG Count; +} QCTLV_PKT_STATISTICS, *PQCTLV_PKT_STATISTICS; +#endif + +//#ifdef QC_IP_MODE + +/* + ?Bit 0 ?Profile identifier + ?Bit 1 ?Profile name + ?Bit 2 ?PDP type + ?Bit 3 ?APN name + ?Bit 4 ?DNS address + ?Bit 5 ?UMTS/GPRS granted QoS + ?Bit 6 ?Username + ?Bit 7 ?Authentication Protocol + ?Bit 8 ?IP address + ?Bit 9 ?Gateway information (address and subnet mask) + ?Bit 10 ?PCSCF address using a PCO flag + ?Bit 11 ?PCSCF server address list + ?Bit 12 ?PCSCF domain name list + ?Bit 13 ?MTU + ?Bit 14 ?Domain name list + ?Bit 15 ?IP family + ?Bit 16 ?IM_CM flag + ?Bit 17 ?Technology name + ?Bit 18 ?Operator reserved PCO +*/ +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4DNS_ADDR (1 << 4) +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4_ADDR (1 << 8) +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4GATEWAY_ADDR (1 << 9) +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_MTU (1 << 13) +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_PCSCF_SV_ADDR (1 << 11) +#define QMIWDS_GET_RUNTIME_SETTINGS_MASK_PCSCF_DOM_NAME (1 << 14) + +typedef struct _QMIWDS_GET_RUNTIME_SETTINGS_REQ_MSG +{ + USHORT Type; // QMIWDS_GET_RUNTIME_SETTINGS_REQ + USHORT Length; + UCHAR TLVType; // 0x10 + USHORT TLVLength; // 0x0004 + ULONG Mask; // mask, bit 8: IP addr -- 0x0100 +} __attribute__ ((packed)) QMIWDS_GET_RUNTIME_SETTINGS_REQ_MSG, *PQMIWDS_GET_RUNTIME_SETTINGS_REQ_MSG; + +typedef struct _QMIWDS_BIND_MUX_DATA_PORT_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + ULONG ep_type; + ULONG iface_id; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR MuxId; + UCHAR TLV3Type; + USHORT TLV3Length; + ULONG client_type; +} __attribute__ ((packed)) QMIWDS_BIND_MUX_DATA_PORT_REQ_MSG, *PQMIWDS_BIND_MUX_DATA_PORT_REQ_MSG; + +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4PRIMARYDNS 0x15 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4SECONDARYDNS 0x16 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4 0x1E +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4GATEWAY 0x20 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4SUBNET 0x21 + +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6 0x25 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6GATEWAY 0x26 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6PRIMARYDNS 0x27 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6SECONDARYDNS 0x28 +#define QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_MTU 0x29 + +typedef struct _QMIWDS_GET_RUNTIME_SETTINGS_TLV_MTU +{ + UCHAR TLVType; // QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_MTU + USHORT TLVLength; // 4 + ULONG Mtu; // MTU +} __attribute__ ((packed)) QMIWDS_GET_RUNTIME_SETTINGS_TLV_MTU, *PQMIWDS_GET_RUNTIME_SETTINGS_TLV_MTU; + +typedef struct _QMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR +{ + UCHAR TLVType; // QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4 + USHORT TLVLength; // 4 + ULONG IPV4Address; // address +} __attribute__ ((packed)) QMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR, *PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR; + +typedef struct _QMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR +{ + UCHAR TLVType; // QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6 + USHORT TLVLength; // 16 + UCHAR IPV6Address[16]; // address + UCHAR PrefixLength; // prefix length +} __attribute__ ((packed)) QMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR, *PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR; + +typedef struct _QMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV6_ADDR +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR PCSCFNumber; +} __attribute__ ((packed)) QMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV6_ADDR, *PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV6_ADDR; + +typedef struct _QMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV4_ADDR +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR PCSCFNumber; +} __attribute__ ((packed)) QMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV4_ADDR, *PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV4_ADDR; + +typedef struct _QMIWDS_GET_RUNTIME_SETTINGS_RESP_MSG +{ + USHORT Type; // QMIWDS_GET_RUNTIME_SETTINGS_RESP + USHORT Length; + UCHAR TLVType; // QCTLV_TYPE_RESULT_CODE + USHORT TLVLength; // 0x0004 + USHORT QMUXResult; // result code + USHORT QMUXError; // error code +} __attribute__ ((packed)) QMIWDS_GET_RUNTIME_SETTINGS_RESP_MSG, *PQMIWDS_GET_RUNTIME_SETTINGS_RESP_MSG; + +//#endif // QC_IP_MODE + +typedef struct _QMIWDS_IP_FAMILY_TLV +{ + UCHAR TLVType; // 0x12 + USHORT TLVLength; // 1 + UCHAR IpFamily; // IPV4-0x04, IPV6-0x06 +} __attribute__ ((packed)) QMIWDS_IP_FAMILY_TLV, *PQMIWDS_IP_FAMILY_TLV; + +typedef struct _QMIWDS_PKT_SRVC_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ConnectionStatus; + UCHAR ReconfigReqd; +} __attribute__ ((packed)) QMIWDS_PKT_SRVC_TLV, *PQMIWDS_PKT_SRVC_TLV; + +typedef struct _QMIWDS_CALL_END_REASON_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT CallEndReason; +} __attribute__ ((packed)) QMIWDS_CALL_END_REASON_TLV, *PQMIWDS_CALL_END_REASON_TLV; + +typedef struct _QMIWDS_CALL_END_REASON_V_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT CallEndReasonType; + USHORT CallEndReason; +} __attribute__ ((packed)) QMIWDS_CALL_END_REASON_V_TLV, *PQMIWDS_CALL_END_REASON_V_TLV; + +typedef struct _QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ_MSG +{ + USHORT Type; // QMUX type 0x004D + USHORT Length; + UCHAR TLVType; // 0x01 + USHORT TLVLength; // 1 + UCHAR IpPreference; // IPV4-0x04, IPV6-0x06 +} __attribute__ ((packed)) QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ_MSG, *PQMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ_MSG; + +typedef struct _QMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP_MSG +{ + USHORT Type; // QMUX type 0x0037 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS, QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INTERNAL, QMI_ERR_MALFORMED_MSG, QMI_ERR_INVALID_ARG +} __attribute__ ((packed)) QMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP_MSG, *PQMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP_MSG; + +typedef struct _QMIWDS_SET_AUTO_CONNECT_REQ_MSG +{ + USHORT Type; // QMUX type 0x0051 + USHORT Length; + UCHAR TLVType; // 0x01 + USHORT TLVLength; // 1 + UCHAR autoconnect_setting; // 0x00 ?C Disabled, 0x01 ?C Enabled, 0x02 ?C Paused (resume on power cycle) +} __attribute__ ((packed)) QMIWDS_SET_AUTO_CONNECT_REQ_MSG, *PQMIWDS_SET_AUTO_CONNECT_REQ_MSG; + +#if 0 +typedef struct _QMIWDS_GET_MIP_MODE_REQ_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; +} QMIWDS_GET_MIP_MODE_REQ_MSG, *PQMIWDS_GET_MIP_MODE_REQ_MSG; + +typedef struct _QMIWDS_GET_MIP_MODE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 20 + UCHAR MipMode; // +} QMIWDS_GET_MIP_MODE_RESP_MSG, *PQMIWDS_GET_MIP_MODE_RESP_MSG; +#endif + +typedef struct _QMIWDS_TECHNOLOGY_PREFERECE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR TechPreference; +} __attribute__ ((packed)) QMIWDS_TECHNOLOGY_PREFERECE, *PQMIWDS_TECHNOLOGY_PREFERECE; + +typedef struct _QMIWDS_PROFILE_IDENTIFIER +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileIndex; +} __attribute__ ((packed)) QMIWDS_PROFILE_IDENTIFIER, *PQMIWDS_PROFILE_IDENTIFIER; + +#if 0 +typedef struct _QMIWDS_IPADDRESS +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG IPv4Address; +}QMIWDS_IPADDRESS, *PQMIWDS_IPADDRESS; + +/* +typedef struct _QMIWDS_UMTS_QOS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR TrafficClass; + ULONG MaxUplinkBitRate; + ULONG MaxDownlinkBitRate; + ULONG GuarUplinkBitRate; + ULONG GuarDownlinkBitRate; + UCHAR QOSDevOrder; + ULONG MAXSDUSize; + UCHAR SDUErrorRatio; + UCHAR ResidualBerRatio; + UCHAR DeliveryErrorSDUs; + ULONG TransferDelay; + ULONG TrafficHndPri; +}QMIWDS_UMTS_QOS, *PQMIWDS_UMTS_QOS; + +typedef struct _QMIWDS_GPRS_QOS +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG PrecedenceClass; + ULONG DelayClass; + ULONG ReliabilityClass; + ULONG PeekThroClass; + ULONG MeanThroClass; +}QMIWDS_GPRS_QOS, *PQMIWDS_GPRS_QOS; +*/ +#endif + +typedef struct _QMIWDS_PDPCONTEXT +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR pdp_context; +} __attribute__ ((packed)) QMIWDS_PDPCONTEXT, *PQMIWDS_PDPCONTEXT; + +typedef struct _QMIWDS_PROFILELIST +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileList[1024]; +} __attribute__ ((packed)) QMIWDS_PROFILELIST, *PQMIWDS_PROFILELIST; + +typedef struct _QMIWDS_PROFILENAME +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileName; +} __attribute__ ((packed)) QMIWDS_PROFILENAME, *PQMIWDS_PROFILENAME; + +typedef struct _QMIWDS_PDPTYPE +{ + UCHAR TLVType; + USHORT TLVLength; +// 0 ?C PDP-IP (IPv4) +// 1 ?C PDP-PPP +// 2 ?C PDP-IPv6 +// 3 ?C PDP-IPv4v6 + UCHAR PdpType; +} __attribute__ ((packed)) QMIWDS_PDPTYPE, *PQMIWDS_PDPTYPE; + +typedef struct _QMIWDS_USERNAME +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR UserName; +} __attribute__ ((packed)) QMIWDS_USERNAME, *PQMIWDS_USERNAME; + +typedef struct _QMIWDS_PASSWD +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR Passwd; +} __attribute__ ((packed)) QMIWDS_PASSWD, *PQMIWDS_PASSWD; + +typedef struct _QMIWDS_AUTH_PREFERENCE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR AuthPreference; +} __attribute__ ((packed)) QMIWDS_AUTH_PREFERENCE, *PQMIWDS_AUTH_PREFERENCE; + +typedef struct _QMIWDS_IPTYPE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR IPType; +} __attribute__ ((packed)) QMIWDS_IPTYPE, *PQMIWDS_IPTYPE; + +typedef struct _QMIWDS_APNNAME +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ApnName; +} __attribute__ ((packed)) QMIWDS_APNNAME, *PQMIWDS_APNNAME; + +typedef struct _QMIWDS_AUTOCONNECT +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR AutoConnect; +} __attribute__ ((packed)) QMIWDS_AUTOCONNECT, *PQMIWDS_AUTOCONNECT; + +typedef struct _QMIWDS_START_NETWORK_INTERFACE_REQ_MSG +{ + USHORT Type; + USHORT Length; +} __attribute__ ((packed)) QMIWDS_START_NETWORK_INTERFACE_REQ_MSG, *PQMIWDS_START_NETWORK_INTERFACE_REQ_MSG; + +typedef struct _QMIWDS_CALLENDREASON +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT Reason; +}__attribute__ ((packed)) QMIWDS_CALLENDREASON, *PQMIWDS_CALLENDREASON; + +typedef struct _QMIWDS_START_NETWORK_INTERFACE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 20 + ULONG Handle; // +} __attribute__ ((packed)) QMIWDS_START_NETWORK_INTERFACE_RESP_MSG, *PQMIWDS_START_NETWORK_INTERFACE_RESP_MSG; + +typedef struct _QMIWDS_STOP_NETWORK_INTERFACE_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + ULONG Handle; +} __attribute__ ((packed)) QMIWDS_STOP_NETWORK_INTERFACE_REQ_MSG, *PQMIWDS_STOP_NETWORK_INTERFACE_REQ_MSG; + +typedef struct _QMIWDS_STOP_NETWORK_INTERFACE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0040 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + +} __attribute__ ((packed)) QMIWDS_STOP_NETWORK_INTERFACE_RESP_MSG, *PQMIWDS_STOP_NETWORK_INTERFACE_RESP_MSG; + +typedef struct _QMIWDS_GET_DEFAULT_SETTINGS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileType; +} __attribute__ ((packed)) QMIWDS_GET_DEFAULT_SETTINGS_REQ_MSG, *PQMIWDS_GET_DEFAULT_SETTINGS_REQ_MSG; + +typedef struct _QMIWDS_GET_DEFAULT_SETTINGS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMIWDS_GET_DEFAULT_SETTINGS_RESP_MSG, *PQMIWDS_GET_DEFAULT_SETTINGS_RESP_MSG; + +typedef struct _QMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileType; + UCHAR ProfileIndex; +} __attribute__ ((packed)) QMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG, *PQMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG; + +typedef struct _QMIWDS_MODIFY_PROFILE_SETTINGS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMIWDS_MODIFY_PROFILE_SETTINGS_RESP_MSG, *PQMIWDS_MODIFY_PROFILE_SETTINGS_RESP_MSG; + +typedef struct _QMIWDS_GET_PROFILE_SETTINGS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileType; + UCHAR ProfileIndex; +} __attribute__ ((packed)) QMIWDS_GET_PROFILE_SETTINGS_REQ_MSG, *PQMIWDS_GET_PROFILE_SETTINGS_REQ_MSG; + +typedef struct _QMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileType; + UCHAR TLV2Type; //0x25 + USHORT TLV2Length; + UCHAR pdp_context; +} __attribute__ ((packed)) QMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG, *PQMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG; + +typedef struct _QMIWDS_GET_PROFILE_LIST_REQ_MSG +{ + USHORT Type; + USHORT Length; +} __attribute__ ((packed)) QMIWDS_GET_PROFILE_LIST_REQ_MSG, *PQMIWDS_GET_PROFILE_LIST_REQ_MSG; + +typedef struct _QMIWDS_GET_PROFILE_LIST_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ProfileList[1024]; +} __attribute__ ((packed)) QMIWDS_GET_PROFILE_LIST_RESP_MSG, *PQMIWDS_GET_PROFILE_LIST_RESP_MSG; + +#if 0 +typedef struct _QMIWDS_EVENT_REPORT_IND_DATA_BEARER_TLV +{ + UCHAR Type; + USHORT Length; + UCHAR DataBearer; +} QMIWDS_EVENT_REPORT_IND_DATA_BEARER_TLV, *PQMIWDS_EVENT_REPORT_IND_DATA_BEARER_TLV; + +typedef struct _QMIWDS_EVENT_REPORT_IND_DORMANCY_STATUS_TLV +{ + UCHAR Type; + USHORT Length; + UCHAR DormancyStatus; +} QMIWDS_EVENT_REPORT_IND_DORMANCY_STATUS_TLV, *PQMIWDS_EVENT_REPORT_IND_DORMANCY_STATUS_TLV; + + +typedef struct _QMIWDS_GET_DATA_BEARER_REQ_MSG +{ + USHORT Type; // QMUX type 0x0037 + USHORT Length; +} QMIWDS_GET_DATA_BEARER_REQ_MSG, *PQMIWDS_GET_DATA_BEARER_REQ_MSG; + +typedef struct _QMIWDS_GET_DATA_BEARER_RESP_MSG +{ + USHORT Type; // QMUX type 0x0037 + USHORT Length; + UCHAR TLVType; // 0x02 + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INTERNAL + // QMI_ERR_MALFORMED_MSG + // QMI_ERR_NO_MEMORY + // QMI_ERR_OUT_OF_CALL + // QMI_ERR_INFO_UNAVAILABLE + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // + UCHAR Technology; // +} QMIWDS_GET_DATA_BEARER_RESP_MSG, *PQMIWDS_GET_DATA_BEARER_RESP_MSG; +#endif + +// ======================= DMS ============================== +#define QMIDMS_SET_EVENT_REPORT_REQ 0x0001 +#define QMIDMS_SET_EVENT_REPORT_RESP 0x0001 +#define QMIDMS_EVENT_REPORT_IND 0x0001 +#define QMIDMS_GET_DEVICE_CAP_REQ 0x0020 +#define QMIDMS_GET_DEVICE_CAP_RESP 0x0020 +#define QMIDMS_GET_DEVICE_MFR_REQ 0x0021 +#define QMIDMS_GET_DEVICE_MFR_RESP 0x0021 +#define QMIDMS_GET_DEVICE_MODEL_ID_REQ 0x0022 +#define QMIDMS_GET_DEVICE_MODEL_ID_RESP 0x0022 +#define QMIDMS_GET_DEVICE_REV_ID_REQ 0x0023 +#define QMIDMS_GET_DEVICE_REV_ID_RESP 0x0023 +#define QMIDMS_GET_MSISDN_REQ 0x0024 +#define QMIDMS_GET_MSISDN_RESP 0x0024 +#define QMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ 0x0025 +#define QMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP 0x0025 +#define QMIDMS_UIM_SET_PIN_PROTECTION_REQ 0x0027 +#define QMIDMS_UIM_SET_PIN_PROTECTION_RESP 0x0027 +#define QMIDMS_UIM_VERIFY_PIN_REQ 0x0028 +#define QMIDMS_UIM_VERIFY_PIN_RESP 0x0028 +#define QMIDMS_UIM_UNBLOCK_PIN_REQ 0x0029 +#define QMIDMS_UIM_UNBLOCK_PIN_RESP 0x0029 +#define QMIDMS_UIM_CHANGE_PIN_REQ 0x002A +#define QMIDMS_UIM_CHANGE_PIN_RESP 0x002A +#define QMIDMS_UIM_GET_PIN_STATUS_REQ 0x002B +#define QMIDMS_UIM_GET_PIN_STATUS_RESP 0x002B +#define QMIDMS_GET_DEVICE_HARDWARE_REV_REQ 0x002C +#define QMIDMS_GET_DEVICE_HARDWARE_REV_RESP 0x002C +#define QMIDMS_GET_OPERATING_MODE_REQ 0x002D +#define QMIDMS_GET_OPERATING_MODE_RESP 0x002D +#define QMIDMS_SET_OPERATING_MODE_REQ 0x002E +#define QMIDMS_SET_OPERATING_MODE_RESP 0x002E +#define QMIDMS_GET_ACTIVATED_STATUS_REQ 0x0031 +#define QMIDMS_GET_ACTIVATED_STATUS_RESP 0x0031 +#define QMIDMS_ACTIVATE_AUTOMATIC_REQ 0x0032 +#define QMIDMS_ACTIVATE_AUTOMATIC_RESP 0x0032 +#define QMIDMS_ACTIVATE_MANUAL_REQ 0x0033 +#define QMIDMS_ACTIVATE_MANUAL_RESP 0x0033 +#define QMIDMS_UIM_GET_ICCID_REQ 0x003C +#define QMIDMS_UIM_GET_ICCID_RESP 0x003C +#define QMIDMS_UIM_GET_CK_STATUS_REQ 0x0040 +#define QMIDMS_UIM_GET_CK_STATUS_RESP 0x0040 +#define QMIDMS_UIM_SET_CK_PROTECTION_REQ 0x0041 +#define QMIDMS_UIM_SET_CK_PROTECTION_RESP 0x0041 +#define QMIDMS_UIM_UNBLOCK_CK_REQ 0x0042 +#define QMIDMS_UIM_UNBLOCK_CK_RESP 0x0042 +#define QMIDMS_UIM_GET_IMSI_REQ 0x0043 +#define QMIDMS_UIM_GET_IMSI_RESP 0x0043 +#define QMIDMS_UIM_GET_STATE_REQ 0x0044 +#define QMIDMS_UIM_GET_STATE_RESP 0x0044 +#define QMIDMS_GET_BAND_CAP_REQ 0x0045 +#define QMIDMS_GET_BAND_CAP_RESP 0x0045 + +#if 0 +typedef struct _QMIDMS_GET_DEVICE_MFR_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMIDMS_GET_DEVICE_MFR_REQ_MSG, *PQMIDMS_GET_DEVICE_MFR_REQ_MSG; + +typedef struct _QMIDMS_GET_DEVICE_MFR_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; // 0x01 - required parameter + USHORT TLV2Length; // length of the mfr string + UCHAR DeviceManufacturer; // first byte of string +} QMIDMS_GET_DEVICE_MFR_RESP_MSG, *PQMIDMS_GET_DEVICE_MFR_RESP_MSG; + +typedef struct _QMIDMS_GET_DEVICE_MODEL_ID_REQ_MSG +{ + USHORT Type; // QMUX type 0x0004 + USHORT Length; +} QMIDMS_GET_DEVICE_MODEL_ID_REQ_MSG, *PQMIDMS_GET_DEVICE_MODEL_ID_REQ_MSG; + +typedef struct _QMIDMS_GET_DEVICE_MODEL_ID_RESP_MSG +{ + USHORT Type; // QMUX type 0x0004 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; // 0x01 - required parameter + USHORT TLV2Length; // length of the modem id string + UCHAR DeviceModelID; // device model id +} QMIDMS_GET_DEVICE_MODEL_ID_RESP_MSG, *PQMIDMS_GET_DEVICE_MODEL_ID_RESP_MSG; +#endif + +typedef struct _QMIDMS_GET_DEVICE_REV_ID_REQ_MSG +{ + USHORT Type; // QMUX type 0x0005 + USHORT Length; +} __attribute__ ((packed)) QMIDMS_GET_DEVICE_REV_ID_REQ_MSG, *PQMIDMS_GET_DEVICE_REV_ID_REQ_MSG; + +typedef struct _DEVICE_REV_ID +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR RevisionID; +} __attribute__ ((packed)) DEVICE_REV_ID, *PDEVICE_REV_ID; + +#if 0 +typedef struct _QMIDMS_GET_DEVICE_REV_ID_RESP_MSG +{ + USHORT Type; // QMUX type 0x0023 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMIDMS_GET_DEVICE_REV_ID_RESP_MSG, *PQMIDMS_GET_DEVICE_REV_ID_RESP_MSG; + +typedef struct _QMIDMS_GET_MSISDN_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; +} QMIDMS_GET_MSISDN_REQ_MSG, *PQMIDMS_GET_MSISDN_REQ_MSG; + +typedef struct _QCTLV_DEVICE_VOICE_NUMBERS +{ + UCHAR TLVType; // as defined above + USHORT TLVLength; // 4/7/7 + UCHAR VoideNumberString; // ESN, IMEI, or MEID + +} QCTLV_DEVICE_VOICE_NUMBERS, *PQCTLV_DEVICE_VOICE_NUMBERS; + + +typedef struct _QMIDMS_GET_MSISDN_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG +} QMIDMS_GET_MSISDN_RESP_MSG, *PQMIDMS_GET_MSISDN_RESP_MSG; +#endif + +typedef struct _QMIDMS_UIM_GET_IMSI_REQ_MSG +{ + USHORT Type; + USHORT Length; +} __attribute__ ((packed)) QMIDMS_UIM_GET_IMSI_REQ_MSG, *PQMIDMS_UIM_GET_IMSI_REQ_MSG; + +typedef struct _QMIDMS_UIM_GET_IMSI_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR IMSI; +} __attribute__ ((packed)) QMIDMS_UIM_GET_IMSI_RESP_MSG, *PQMIDMS_UIM_GET_IMSI_RESP_MSG; + +#if 0 +typedef struct _QMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ_MSG +{ + USHORT Type; // QMUX type 0x0007 + USHORT Length; +} QMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ_MSG, *PQMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ_MSG; + +#define QCTLV_TYPE_SER_NUM_ESN 0x10 +#define QCTLV_TYPE_SER_NUM_IMEI 0x11 +#define QCTLV_TYPE_SER_NUM_MEID 0x12 + +typedef struct _QCTLV_DEVICE_SERIAL_NUMBER +{ + UCHAR TLVType; // as defined above + USHORT TLVLength; // 4/7/7 + UCHAR SerialNumberString; // ESN, IMEI, or MEID + +} QCTLV_DEVICE_SERIAL_NUMBER, *PQCTLV_DEVICE_SERIAL_NUMBER; + +typedef struct _QMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP_MSG +{ + USHORT Type; // QMUX type 0x0007 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + // followed by optional TLV +} QMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP_MSG, *PQMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP; + +typedef struct _QMIDMS_GET_DMS_BAND_CAP +{ + USHORT Type; + USHORT Length; +} QMIDMS_GET_BAND_CAP_REQ_MSG, *PQMIDMS_GET_BAND_CAP_REQ_MSG; + +typedef struct _QMIDMS_GET_BAND_CAP_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_NONE + // QMI_ERR_INTERNAL + // QMI_ERR_MALFORMED_MSG + // QMI_ERR_NO_MEMORY + + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 2 + ULONG64 BandCap; +} QMIDMS_GET_BAND_CAP_RESP_MSG, *PQMIDMS_GET_BAND_CAP_RESP; + +typedef struct _QMIDMS_GET_DEVICE_CAP_REQ_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; +} QMIDMS_GET_DEVICE_CAP_REQ_MSG, *PQMIDMS_GET_DEVICE_CAP_REQ_MSG; + +typedef struct _QMIDMS_GET_DEVICE_CAP_RESP_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMUX_RESULT_SUCCESS + // QMUX_RESULT_FAILURE + USHORT QMUXError; // QMUX_ERR_INVALID_ARG + // QMUX_ERR_NO_MEMORY + // QMUX_ERR_INTERNAL + // QMUX_ERR_FAULT + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 2 + + ULONG MaxTxChannelRate; + ULONG MaxRxChannelRate; + UCHAR VoiceCap; + UCHAR SimCap; + + UCHAR RadioIfListCnt; // #elements in radio interface list + UCHAR RadioIfList; // N 1-byte elements +} QMIDMS_GET_DEVICE_CAP_RESP_MSG, *PQMIDMS_GET_DEVICE_CAP_RESP_MSG; + +typedef struct _QMIDMS_GET_ACTIVATED_STATUS_REQ_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; +} QMIDMS_GET_ACTIVATED_STATUS_REQ_MSG, *PQMIDMS_GET_ACTIVATES_STATUD_REQ_MSG; + +typedef struct _QMIDMS_GET_ACTIVATED_STATUS_RESP_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMUX_RESULT_SUCCESS + // QMUX_RESULT_FAILURE + USHORT QMUXError; // QMUX_ERR_INVALID_ARG + // QMUX_ERR_NO_MEMORY + // QMUX_ERR_INTERNAL + // QMUX_ERR_FAULT + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 2 + + USHORT ActivatedStatus; +} QMIDMS_GET_ACTIVATED_STATUS_RESP_MSG, *PQMIDMS_GET_ACTIVATED_STATUS_RESP_MSG; + +typedef struct _QMIDMS_GET_OPERATING_MODE_REQ_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; +} QMIDMS_GET_OPERATING_MODE_REQ_MSG, *PQMIDMS_GET_OPERATING_MODE_REQ_MSG; + +typedef struct _OFFLINE_REASON +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT OfflineReason; +} OFFLINE_REASON, *POFFLINE_REASON; + +typedef struct _HARDWARE_RESTRICTED_MODE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR HardwareControlledMode; +} HARDWARE_RESTRICTED_MODE, *PHARDWARE_RESTRICTED_MODE; + +typedef struct _QMIDMS_GET_OPERATING_MODE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMUX_RESULT_SUCCESS + // QMUX_RESULT_FAILURE + USHORT QMUXError; // QMUX_ERR_INVALID_ARG + // QMUX_ERR_NO_MEMORY + // QMUX_ERR_INTERNAL + // QMUX_ERR_FAULT + UCHAR TLV2Type; // 0x01 + USHORT TLV2Length; // 2 + + UCHAR OperatingMode; +} QMIDMS_GET_OPERATING_MODE_RESP_MSG, *PQMIDMS_GET_OPERATING_MODE_RESP_MSG; + +typedef struct _QMIDMS_UIM_GET_ICCID_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; +} QMIDMS_UIM_GET_ICCID_REQ_MSG, *PQMIDMS_UIM_GET_ICCID_REQ_MSG; + +typedef struct _QMIDMS_UIM_GET_ICCID_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; // 0x01 - required parameter + USHORT TLV2Length; // var + UCHAR ICCID; // String of voice number +} QMIDMS_UIM_GET_ICCID_RESP_MSG, *PQMIDMS_UIM_GET_ICCID_RESP_MSG; +#endif + +typedef struct _QMIDMS_SET_OPERATING_MODE_REQ_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR OperatingMode; +} __attribute__ ((packed)) QMIDMS_SET_OPERATING_MODE_REQ_MSG, *PQMIDMS_SET_OPERATING_MODE_REQ_MSG; + +typedef struct _QMIDMS_SET_OPERATING_MODE_RESP_MSG +{ + USHORT Type; // QMUX type 0x0002 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMUX_RESULT_SUCCESS + // QMUX_RESULT_FAILURE + USHORT QMUXError; // QMUX_ERR_INVALID_ARG + // QMUX_ERR_NO_MEMORY + // QMUX_ERR_INTERNAL + // QMUX_ERR_FAULT +} __attribute__ ((packed)) QMIDMS_SET_OPERATING_MODE_RESP_MSG, *PQMIDMS_SET_OPERATING_MODE_RESP_MSG; + +#if 0 +typedef struct _QMIDMS_ACTIVATE_AUTOMATIC_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // + UCHAR ActivateCodelen; + UCHAR ActivateCode; +} QMIDMS_ACTIVATE_AUTOMATIC_REQ_MSG, *PQMIDMS_ACTIVATE_AUTOMATIC_REQ_MSG; + +typedef struct _QMIDMS_ACTIVATE_AUTOMATIC_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMIDMS_ACTIVATE_AUTOMATIC_RESP_MSG, *PQMIDMS_ACTIVATE_AUTOMATIC_RESP_MSG; + + +typedef struct _SPC_MSG +{ + UCHAR SPC[6]; + USHORT SID; +} SPC_MSG, *PSPC_MSG; + +typedef struct _MDN_MSG +{ + UCHAR MDNLEN; + UCHAR MDN; +} MDN_MSG, *PMDN_MSG; + +typedef struct _MIN_MSG +{ + UCHAR MINLEN; + UCHAR MIN; +} MIN_MSG, *PMIN_MSG; + +typedef struct _PRL_MSG +{ + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // + USHORT PRLLEN; + UCHAR PRL; +} PRL_MSG, *PPRL_MSG; + +typedef struct _MN_HA_KEY_MSG +{ + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // + UCHAR MN_HA_KEY_LEN; + UCHAR MN_HA_KEY; +} MN_HA_KEY_MSG, *PMN_HA_KEY_MSG; + +typedef struct _MN_AAA_KEY_MSG +{ + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // + UCHAR MN_AAA_KEY_LEN; + UCHAR MN_AAA_KEY; +} MN_AAA_KEY_MSG, *PMN_AAA_KEY_MSG; + +typedef struct _QMIDMS_ACTIVATE_MANUAL_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // + UCHAR Value; +} QMIDMS_ACTIVATE_MANUAL_REQ_MSG, *PQMIDMS_ACTIVATE_MANUAL_REQ_MSG; + +typedef struct _QMIDMS_ACTIVATE_MANUAL_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMIDMS_ACTIVATE_MANUAL_RESP_MSG, *PQMIDMS_ACTIVATE_MANUAL_RESP_MSG; +#endif + +typedef struct _QMIDMS_UIM_GET_STATE_REQ_MSG +{ + USHORT Type; + USHORT Length; +} __attribute__ ((packed)) QMIDMS_UIM_GET_STATE_REQ_MSG, *PQMIDMS_UIM_GET_STATE_REQ_MSG; + +typedef struct _QMIDMS_UIM_GET_STATE_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR UIMState; +} __attribute__ ((packed)) QMIDMS_UIM_GET_STATE_RESP_MSG, *PQMIDMS_UIM_GET_STATE_RESP_MSG; + +typedef struct _QMIDMS_UIM_GET_PIN_STATUS_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; +} __attribute__ ((packed)) QMIDMS_UIM_GET_PIN_STATUS_REQ_MSG, *PQMIDMS_UIM_GET_PIN_STATUS_REQ_MSG; + +typedef struct _QMIDMS_UIM_PIN_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR PINStatus; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} __attribute__ ((packed)) QMIDMS_UIM_PIN_STATUS, *PQMIDMS_UIM_PIN_STATUS; + +#define QMI_PIN_STATUS_NOT_INIT 0 +#define QMI_PIN_STATUS_NOT_VERIF 1 +#define QMI_PIN_STATUS_VERIFIED 2 +#define QMI_PIN_STATUS_DISABLED 3 +#define QMI_PIN_STATUS_BLOCKED 4 +#define QMI_PIN_STATUS_PERM_BLOCKED 5 +#define QMI_PIN_STATUS_UNBLOCKED 6 +#define QMI_PIN_STATUS_CHANGED 7 + + +typedef struct _QMIDMS_UIM_GET_PIN_STATUS_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR PinStatus; +} __attribute__ ((packed)) QMIDMS_UIM_GET_PIN_STATUS_RESP_MSG, *PQMIDMS_UIM_GET_PIN_STATUS_RESP_MSG; + +#if 0 +typedef struct _QMIDMS_UIM_GET_CK_STATUS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR Facility; +} QMIDMS_UIM_GET_CK_STATUS_REQ_MSG, *PQMIDMS_UIM_GET_CK_STATUS_REQ_MSG; + + +typedef struct _QMIDMS_UIM_CK_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR FacilityStatus; + UCHAR FacilityVerifyRetriesLeft; + UCHAR FacilityUnblockRetriesLeft; +} QMIDMS_UIM_CK_STATUS, *PQMIDMS_UIM_CK_STATUS; + +typedef struct _QMIDMS_UIM_CK_OPERATION_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR OperationBlocking; +} QMIDMS_UIM_CK_OPERATION_STATUS, *PQMIDMS_UIM_CK_OPERATION_STATUS; + +typedef struct _QMIDMS_UIM_GET_CK_STATUS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR CkStatus; +} QMIDMS_UIM_GET_CK_STATUS_RESP_MSG, *PQMIDMS_UIM_GET_CK_STATUS_RESP_MSG; +#endif + +typedef struct _QMIDMS_UIM_VERIFY_PIN_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR PINID; + UCHAR PINLen; + UCHAR PINValue; +} __attribute__ ((packed)) QMIDMS_UIM_VERIFY_PIN_REQ_MSG, *PQMIDMS_UIM_VERIFY_PIN_REQ_MSG; + +typedef struct _QMIDMS_UIM_VERIFY_PIN_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} __attribute__ ((packed)) QMIDMS_UIM_VERIFY_PIN_RESP_MSG, *PQMIDMS_UIM_VERIFY_PIN_RESP_MSG; + +#if 0 +typedef struct _QMIDMS_UIM_SET_PIN_PROTECTION_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR PINID; + UCHAR ProtectionSetting; + UCHAR PINLen; + UCHAR PINValue; +} QMIDMS_UIM_SET_PIN_PROTECTION_REQ_MSG, *PQMIDMS_UIM_SET_PIN_PROTECTION_REQ_MSG; + +typedef struct _QMIDMS_UIM_SET_PIN_PROTECTION_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} QMIDMS_UIM_SET_PIN_PROTECTION_RESP_MSG, *PQMIDMS_UIM_SET_PIN_PROTECTION_RESP_MSG; + +typedef struct _QMIDMS_UIM_SET_CK_PROTECTION_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR Facility; + UCHAR FacilityState; + UCHAR FacliltyLen; + UCHAR FacliltyValue; +} QMIDMS_UIM_SET_CK_PROTECTION_REQ_MSG, *PQMIDMS_UIM_SET_CK_PROTECTION_REQ_MSG; + +typedef struct _QMIDMS_UIM_SET_CK_PROTECTION_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR FacilityRetriesLeft; +} QMIDMS_UIM_SET_CK_PROTECTION_RESP_MSG, *PQMIDMS_UIM_SET_CK_PROTECTION_RESP_MSG; + + +typedef struct _UIM_PIN +{ + UCHAR PinLength; + UCHAR PinValue; +} UIM_PIN, *PUIM_PIN; + +typedef struct _QMIDMS_UIM_CHANGE_PIN_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR PINID; + UCHAR PinDetails; +} QMIDMS_UIM_CHANGE_PIN_REQ_MSG, *PQMIDMS_UIM_CHANGE_PIN_REQ_MSG; + +typedef struct QMIDMS_UIM_CHANGE_PIN_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} QMIDMS_UIM_CHANGE_PIN_RESP_MSG, *PQMIDMS_UIM_CHANGE_PIN_RESP_MSG; + +typedef struct _UIM_PUK +{ + UCHAR PukLength; + UCHAR PukValue; +} UIM_PUK, *PUIM_PUK; + +typedef struct _QMIDMS_UIM_UNBLOCK_PIN_REQ_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR PINID; + UCHAR PinDetails; +} QMIDMS_UIM_UNBLOCK_PIN_REQ_MSG, *PQMIDMS_UIM_BLOCK_PIN_REQ_MSG; + +typedef struct QMIDMS_UIM_UNBLOCK_PIN_RESP_MSG +{ + USHORT Type; // QMUX type 0x0024 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} QMIDMS_UIM_UNBLOCK_PIN_RESP_MSG, *PQMIDMS_UIM_UNBLOCK_PIN_RESP_MSG; + +typedef struct _QMIDMS_UIM_UNBLOCK_CK_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR Facility; + UCHAR FacliltyUnblockLen; + UCHAR FacliltyUnblockValue; +} QMIDMS_UIM_UNBLOCK_CK_REQ_MSG, *PQMIDMS_UIM_BLOCK_CK_REQ_MSG; + +typedef struct QMIDMS_UIM_UNBLOCK_CK_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR FacilityUnblockRetriesLeft; +} QMIDMS_UIM_UNBLOCK_CK_RESP_MSG, *PQMIDMS_UIM_UNBLOCK_CK_RESP_MSG; + +typedef struct _QMIDMS_SET_EVENT_REPORT_REQ_MSG +{ + USHORT Type; + USHORT Length; +} QMIDMS_SET_EVENT_REPORT_REQ_MSG, *PQMIDMS_SET_EVENT_REPORT_REQ_MSG; + +typedef struct _QMIDMS_SET_EVENT_REPORT_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG +} QMIDMS_SET_EVENT_REPORT_RESP_MSG, *PQMIDMS_SET_EVENT_REPORT_RESP_MSG; + +typedef struct _PIN_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ReportPinState; +} PIN_STATUS, *PPIN_STATUS; + +typedef struct _POWER_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR PowerStatus; + UCHAR BatteryLvl; +} POWER_STATUS, *PPOWER_STATUS; + +typedef struct _ACTIVATION_STATE +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT ActivationState; +} ACTIVATION_STATE, *PACTIVATION_STATE; + +typedef struct _ACTIVATION_STATE_REQ +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ActivationState; +} ACTIVATION_STATE_REQ, *PACTIVATION_STATE_REQ; + +typedef struct _OPERATING_MODE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR OperatingMode; +} OPERATING_MODE, *POPERATING_MODE; + +typedef struct _UIM_STATE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR UIMState; +} UIM_STATE, *PUIM_STATE; + +typedef struct _WIRELESS_DISABLE_STATE +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR WirelessDisableState; +} WIRELESS_DISABLE_STATE, *PWIRELESS_DISABLE_STATE; + +typedef struct _QMIDMS_EVENT_REPORT_IND_MSG +{ + USHORT Type; + USHORT Length; +} QMIDMS_EVENT_REPORT_IND_MSG, *PQMIDMS_EVENT_REPORT_IND_MSG; +#endif + +// ============================ END OF DMS =============================== + +// ======================= QOS ============================== +typedef struct _MPIOC_DEV_INFO MPIOC_DEV_INFO, *PMPIOC_DEV_INFO; + +#define QMI_QOS_SET_EVENT_REPORT_REQ 0x0001 +#define QMI_QOS_SET_EVENT_REPORT_RESP 0x0001 +#define QMI_QOS_SET_EVENT_REPORT_IND 0x0001 +#define QMI_QOS_BIND_DATA_PORT_REQ 0x002B +#define QMI_QOS_BIND_DATA_PORT_RESP 0x002B +#define QMI_QOS_INDICATION_REGISTER_REQ 0x002F +#define QMI_QOS_INDICATION_REGISTER_RESP 0x002F +#define QMI_QOS_GLOBAL_QOS_FLOW_IND 0x0031 +#define QMI_QOS_GET_QOS_INFO_REQ 0x0033 +#define QMI_QOS_GET_QOS_INFO_RESP 0x0033 + + +#if 1 +typedef struct _QMI_QOS_SET_EVENT_REPORT_REQ_MSG +{ + USHORT Type; // QMUX type 0x0001 + USHORT Length; + // UCHAR TLVType; // 0x01 - physical link state + // USHORT TLVLength; // 1 + // UCHAR PhyLinkStatusRpt; // 0-enable; 1-disable + UCHAR TLVType2; // 0x02 = global flow reporting + USHORT TLVLength2; // 1 + UCHAR GlobalFlowRpt; // 1-enable; 0-disable +} QMI_QOS_SET_EVENT_REPORT_REQ_MSG, *PQMI_QOS_SET_EVENT_REPORT_REQ_MSG; + +typedef struct _QMI_QOS_SET_EVENT_REPORT_RESP_MSG +{ + USHORT Type; // QMUX type 0x0010 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMUX_RESULT_SUCCESS + // QMUX_RESULT_FAILURE + USHORT QMUXError; // QMUX_ERR_INVALID_ARG + // QMUX_ERR_NO_MEMORY + // QMUX_ERR_INTERNAL + // QMUX_ERR_FAULT +} QMI_QOS_SET_EVENT_REPORT_RESP_MSG, *PQMI_QOS_SET_EVENT_REPORT_RESP_MSG; + +typedef struct _QMI_QOS_SET_EVENT_REPORT_IND_MSG +{ + USHORT Type; // QMUX type 0x0001 + USHORT Length; + UCHAR TLVs; +} QMI_QOS_SET_EVENT_REPORT_IND_MSG, *PQMI_QOS_SET_EVENT_REPORT_IND_MSG; + + +typedef struct _QMI_QOS_BIND_DATA_PORT_TLV_EP_ID +{ + UCHAR TLVType; //0x10 + USHORT TLVLength; + ULONG ep_type; + ULONG iface_id; +} __attribute__ ((packed)) QMI_QOS_BIND_DATA_PORT_TLV_EP_ID, *PQMI_QOS_BIND_DATA_PORT_TLV_EP_ID; + +typedef struct _QMI_QOS_BIND_DATA_PORT_TLV_MUX_ID +{ + UCHAR TLVType; //0x11 + USHORT TLVLength; + UCHAR mux_id; +} __attribute__ ((packed)) QMI_QOS_BIND_DATA_PORT_TLV_MUX_ID, *PQMI_QOS_BIND_DATA_PORT_TLV_MUX_ID; + +typedef struct _QMI_QOS_BIND_DATA_PORT_TLV_DATA_PORT +{ + UCHAR TLVType; //0x12 + USHORT TLVLength; + USHORT data_port; +} __attribute__ ((packed)) QMI_QOS_BIND_DATA_PORT_TLV_DATA_PORT, *PQMI_QOS_BIND_DATA_PORT_TLV_DATA_PORT; + +typedef struct _QMI_QOS_BIND_DATA_PORT_REQ_MSG +{ + USHORT Type; + USHORT Length; + QMI_QOS_BIND_DATA_PORT_TLV_EP_ID EpIdTlv; + QMI_QOS_BIND_DATA_PORT_TLV_MUX_ID MuxIdTlv; + //QMI_QOS_BIND_DATA_PORT_TLV_DATA_PORT DataPortTlv; +} __attribute__ ((packed)) QMI_QOS_BIND_DATA_PORT_REQ_MSG, *PQMI_QOS_BIND_DATA_PORT_REQ_MSG; + +typedef struct _QMI_QOS_BIND_DATA_PORT_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; //0x02 + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMI_QOS_BIND_DATA_PORT_RESP_MSG, *PQMI_QOS_BIND_DATA_PORT_RESP_MSG; + +typedef struct _QMI_QOS_INDICATION_REGISTER_TLV_REPORT_GLOBAL_QOS_FLOW +{ + UCHAR TLVType; //0x10 + USHORT TLVLength; + UCHAR report_global_qos_flows; +} __attribute__ ((packed)) QMI_QOS_INDICATION_REGISTER_TLV_REPORT_GLOBAL_QOS_FLOW, *PQMI_QOS_INDICATION_REGISTER_TLV_REPORT_GLOBAL_QOS_FLOW; + +typedef struct _QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_REPORT_FLOW_CTL +{ + UCHAR TLVType; //0x11 + USHORT TLVLength; + UCHAR suppress_report_flow_control; +} __attribute__ ((packed)) QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_REPORT_FLOW_CTL, *PQMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_REPORT_FLOW_CTL; + +typedef struct _QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_NW_STATUS_IND +{ + UCHAR TLVType; //0x12 + USHORT TLVLength; + UCHAR suppress_network_status_ind; +} __attribute__ ((packed)) QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_NW_STATUS_IND, *PQMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_NW_STATUS_IND; + +typedef struct _QMI_QOS_INDICATION_REGISTER_REQ_MSG +{ + USHORT Type; + USHORT Length; + QMI_QOS_INDICATION_REGISTER_TLV_REPORT_GLOBAL_QOS_FLOW ReportGlobalQosFlowTlv; + //QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_REPORT_FLOW_CTL SuppressReportFlowCtlTlv; + //QMI_QOS_INDICATION_REGISTER_TLV_SUPPRESS_NW_STATUS_IND SuppressNWStatusIndTlv; +} __attribute__ ((packed)) QMI_QOS_INDICATION_REGISTER_REQ_MSG, *PQMI_QOS_INDICATION_REGISTER_REQ_MSG; + +typedef struct _QMI_QOS_INDICATION_REGISTER_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; //0x02 + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMI_QOS_INDICATION_REGISTER_RESP_MSG, *PQMI_QOS_INDICATION_REGISTER_RESP_MSG; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE +{ + UCHAR TLVType; //0x01 + USHORT TLVLength; + ULONG qos_id; + UCHAR new_flow; + ULONG state_change; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED +{ + UCHAR TLVType; //0x10 0x11 + USHORT TLVLength; + ULONG64 flow_valid_params; + ULONG ip_flow_trf_cls; + ULONG64 data_rate_max; + ULONG64 guaranteed_rate; + ULONG peak_rate; + ULONG token_rate; + ULONG bucket_size; + ULONG ip_flow_latency; + ULONG ip_flow_jitter; + USHORT ip_flow_pkt_error_rate_multiplier; + USHORT ip_flow_pkt_error_rate_exponent; + ULONG ip_flow_min_policed_packet_size; + ULONG ip_flow_max_allowed_packet_size; + ULONG ip_flow_3gpp_residual_bit_error_rate; + ULONG ip_flow_3gpp_traffic_handling_priority; + USHORT ip_flow_3gpp2_profile_id; + UCHAR ip_flow_3gpp2_flow_priority; + UCHAR ip_flow_3gpp_im_cn_flag; + UCHAR ip_flow_3gpp_sig_ind; + ULONG ip_flow_lte_qci; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_FILTER +{ + UCHAR TLVType; //0x12 0x13 + USHORT TLVLength; + UCHAR tx_rx_qos_filter_len; + UCHAR ip_version; + ULONG64 valid_params0; + ULONG ipv4_addr0; + ULONG subnet_mask0; + ULONG ipv4_addr1; + ULONG subnet_mask1; + UCHAR val4; + UCHAR mask4; + ULONG64 valid_params01; + UCHAR ipv6_address00; + UCHAR ipv6_address01; + UCHAR ipv6_address02; + UCHAR ipv6_address03; + UCHAR ipv6_address04; + UCHAR ipv6_address05; + UCHAR ipv6_address06; + UCHAR ipv6_address07; + UCHAR ipv6_address08; + UCHAR ipv6_address09; + UCHAR ipv6_address010; + UCHAR ipv6_address011; + UCHAR ipv6_address012; + UCHAR ipv6_address013; + UCHAR ipv6_address014; + ULONG ipv6_address015; + UCHAR prefix_len0; + UCHAR ipv6_address10; + UCHAR ipv6_address11; + UCHAR ipv6_address12; + UCHAR ipv6_address13; + UCHAR ipv6_address14; + UCHAR ipv6_address15; + UCHAR ipv6_address16; + UCHAR ipv6_address17; + UCHAR ipv6_address18; + UCHAR ipv6_address19; + UCHAR ipv6_address110; + UCHAR ipv6_address111; + UCHAR ipv6_address112; + UCHAR ipv6_address113; + UCHAR ipv6_address114; + ULONG ipv6_address115; + UCHAR prefix_len1; + UCHAR val6; + UCHAR mask6; + ULONG flow_label; + ULONG xport_protocol; + ULONG64 valid_params2; + USHORT port0; + USHORT range0; + USHORT port1; + USHORT range1; + ULONG64 valid_params3; + USHORT port2; + USHORT range2; + USHORT port3; + USHORT range3; + ULONG64 valid_params4; + UCHAR type; + UCHAR code; + ULONG64 valid_params5; + ULONG spi0; + ULONG64 valid_params6; + ULONG spi1; + USHORT filter_id; + USHORT filter_precedence; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_FILTER, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FILTER; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_TYPE +{ + UCHAR TLVType; //0x14 + USHORT TLVLength; + ULONG flow_type; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_TYPE, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_TYPE; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_BEARER_ID +{ + UCHAR TLVType; //0x15 + USHORT TLVLength; + UCHAR bearer_id; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_BEARER_ID, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_BEARER_ID; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_CTL_SEQ_NUM +{ + UCHAR TLVType; //0x16 + USHORT TLVLength; + USHORT fc_seq_num; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_CTL_SEQ_NUM, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_CTL_SEQ_NUM; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_5G_QCI +{ + UCHAR TLVType; //0x17 0x18 + USHORT TLVLength; + ULONG tx_rx_5g_qci; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_5G_QCI, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_5G_QCI; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_AVG_WINDOW +{ + UCHAR TLVType; //0x19 0x1A + USHORT TLVLength; + USHORT tx_rx_avg_window; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_AVG_WINDOW, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_AVG_WINDOW; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_TLV_TX_FILTER_MATCH_ALL +{ + UCHAR TLVType; //0x1B + USHORT TLVLength; + UCHAR tx_filter_match_all_len; + USHORT filter_id; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_TLV_TX_FILTER_MATCH_ALL, *PQMI_QOS_GLOBAL_QOS_FLOW_TLV_TX_FILTER_MATCH_ALL; + +typedef struct _QMI_QOS_GLOBAL_QOS_FLOW_IND_MSG +{ + USHORT Type; + USHORT Length; + QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE FlowStateTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED TxFlowGrantedTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED RxFlowGrantedTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FILTER TxFilterTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FILTER RxFilterTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_TYPE FlowTypeTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_BEARER_ID BearerIdTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_CTL_SEQ_NUM FlowCtlSeqNumTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_5G_QCI Tx5GQciTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_5G_QCI Rx5GQciTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_AVG_WINDOW AvgWindowTlv; + //QMI_QOS_GLOBAL_QOS_FLOW_TLV_TX_FILTER_MATCH_ALL TxFilterMatchAllTlv; +} __attribute__ ((packed)) QMI_QOS_GLOBAL_QOS_FLOW_IND_MSG, *PQMI_QOS_GLOBAL_QOS_FLOW_IND_MSG; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_QOS_ID +{ + UCHAR TLVType; //0x01 + USHORT TLVLength; + ULONG qos_id; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_QOS_ID, *PQMI_QOS_GET_QOS_INFO_TLV_QOS_ID; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_FLOW_STATUS +{ + UCHAR TLVType; //0x10 + USHORT TLVLength; + UCHAR flow_status; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_FLOW_STATUS, *PQMI_QOS_GET_QOS_INFO_TLV_FLOW_STATUS; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW +{ + UCHAR TLVType; //0x11 0x12 + USHORT TLVLength; + ULONG64 flow_valid_params; + ULONG ip_flow_trf_cls; + ULONG64 data_rate_max; + ULONG64 guaranteed_rate; + ULONG peak_rate; + ULONG token_rate; + ULONG bucket_size; + ULONG ip_flow_latency; + ULONG ip_flow_jitter; + USHORT ip_flow_pkt_error_rate_multiplier; + USHORT ip_flow_pkt_error_rate_exponent; + ULONG ip_flow_min_policed_packet_size; + ULONG ip_flow_max_allowed_packet_size; + ULONG ip_flow_3gpp_residual_bit_error_rate; + ULONG ip_flow_3gpp_traffic_handling_priority; + USHORT ip_flow_3gpp2_profile_id; + UCHAR ip_flow_3gpp2_flow_priority; + UCHAR ip_flow_3gpp_im_cn_flag; + UCHAR ip_flow_3gpp_sig_ind; + ULONG ip_flow_lte_qci; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW, *PQMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_FILTER_SPECS +{ + UCHAR TLVType; //0x13 0x14 + USHORT TLVLength; + UCHAR tx_rx_qos_filter_len; + UCHAR ip_version; + ULONG64 valid_params0; + ULONG ipv4_addr0; + ULONG subnet_mask0; + ULONG ipv4_addr1; + ULONG subnet_mask1; + UCHAR val4; + UCHAR mask4; + ULONG64 valid_params01; + UCHAR ipv6_address00; + UCHAR ipv6_address01; + UCHAR ipv6_address02; + UCHAR ipv6_address03; + UCHAR ipv6_address04; + UCHAR ipv6_address05; + UCHAR ipv6_address06; + UCHAR ipv6_address07; + UCHAR ipv6_address08; + UCHAR ipv6_address09; + UCHAR ipv6_address010; + UCHAR ipv6_address011; + UCHAR ipv6_address012; + UCHAR ipv6_address013; + UCHAR ipv6_address014; + ULONG ipv6_address015; + UCHAR prefix_len0; + UCHAR ipv6_address10; + UCHAR ipv6_address11; + UCHAR ipv6_address12; + UCHAR ipv6_address13; + UCHAR ipv6_address14; + UCHAR ipv6_address15; + UCHAR ipv6_address16; + UCHAR ipv6_address17; + UCHAR ipv6_address18; + UCHAR ipv6_address19; + UCHAR ipv6_address110; + UCHAR ipv6_address111; + UCHAR ipv6_address112; + UCHAR ipv6_address113; + UCHAR ipv6_address114; + ULONG ipv6_address115; + UCHAR prefix_len1; + UCHAR val6; + UCHAR mask6; + ULONG flow_label; + ULONG xport_protocol; + ULONG64 valid_params2; + USHORT port0; + USHORT range0; + USHORT port1; + USHORT range1; + ULONG64 valid_params3; + USHORT port2; + USHORT range2; + USHORT port3; + USHORT range3; + ULONG64 valid_params4; + UCHAR type; + UCHAR code; + ULONG64 valid_params5; + ULONG spi0; + ULONG64 valid_params6; + ULONG spi1; + USHORT filter_id; + USHORT filter_precedence; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_FILTER_SPECS, *PQMI_QOS_GET_QOS_INFO_TLV_FILTER_SPECS; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_EXT_ERROR_INFO +{ + UCHAR TLVType; //0x15 + USHORT TLVLength; + USHORT ext_error_info; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_EXT_ERROR_INFO, *PQMI_QOS_GET_QOS_INFO_TLV_EXT_ERROR_INFO; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_5G_QCI +{ + UCHAR TLVType; //0x16 0x17 + USHORT TLVLength; + ULONG tx_rx_5g_qci; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_5G_QCI, *PQMI_QOS_GET_QOS_INFO_TLV_5G_QCI; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_AVG_WINDOW +{ + UCHAR TLVType; //0x18 0x19 + USHORT TLVLength; + USHORT tx_rx_averaging_window; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_AVG_WINDOW, *PQMI_QOS_GET_QOS_INFO_TLV_AVG_WINDOW; + +typedef struct _QMI_QOS_GET_QOS_INFO_TLV_TX_FILTER_MATCH_ALL +{ + UCHAR TLVType; //0x1A + USHORT TLVLength; + UCHAR tx_filter_match_all_len; + USHORT filter_id; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_TLV_TX_FILTER_MATCH_ALL, *PQMI_QOS_GET_QOS_INFO_TLV_TX_FILTER_MATCH_ALL; + +typedef struct _QMI_QOS_GET_QOS_INFO_REQ_MSG +{ + USHORT Type; + USHORT Length; + QMI_QOS_GET_QOS_INFO_TLV_QOS_ID QosIdTlv; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_REQ_MSG, *PQMI_QOS_GET_QOS_INFO_REQ_MSG; + +typedef struct _QMI_QOS_GET_QOS_INFO_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; //0x02 + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + //QMI_QOS_GET_QOS_INFO_TLV_FLOW_STATUS FlowStatusTlv; + //QMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW TxGrantedFlowTlv; + //QMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW RxGrantedFlowTlv; + //QMI_QOS_GET_QOS_INFO_TLV_FILTER_SPECS TxFilterSpecsTlv; + //QMI_QOS_GET_QOS_INFO_TLV_FILTER_SPECS RxFilterSpecsTlv; + //QMI_QOS_GET_QOS_INFO_TLV_EXT_ERROR_INFO ExtErrorInfoTlv; + //QMI_QOS_GET_QOS_INFO_TLV_5G_QCI Tx5GQciTlv; + //QMI_QOS_GET_QOS_INFO_TLV_5G_QCI Rx5GQciTlv; + //QMI_QOS_GET_QOS_INFO_TLV_AVG_WINDOW TxAvgWindowTlv; + //QMI_QOS_GET_QOS_INFO_TLV_AVG_WINDOW RxAvgWindowTlv; + //QMI_QOS_GET_QOS_INFO_TLV_TX_FILTER_MATCH_ALL TxFilterMatchAllTlv; +} __attribute__ ((packed)) QMI_QOS_GET_QOS_INFO_RESP_MSG, *PQMI_QOS_GET_QOS_INFO_RESP_MSG; + +#define QOS_IND_FLOW_STATE_ACTIVATED 0x00 +#define QOS_IND_FLOW_STATE_MODIFIED 0x01 +#define QOS_IND_FLOW_STATE_DELETED 0x02 +#define QOS_IND_FLOW_STATE_SUSPENDED 0x03 +#define QOS_IND_FLOW_STATE_ENABLED 0x04 +#define QOS_IND_FLOW_STATE_DISABLED 0x05 +#define QOS_IND_FLOW_STATE_INVALID 0x06 + +#define QOS_EVENT_RPT_IND_FLOW_ACTIVATED 0x01 +#define QOS_EVENT_RPT_IND_FLOW_MODIFIED 0x02 +#define QOS_EVENT_RPT_IND_FLOW_DELETED 0x03 +#define QOS_EVENT_RPT_IND_FLOW_SUSPENDED 0x04 +#define QOS_EVENT_RPT_IND_FLOW_ENABLED 0x05 +#define QOS_EVENT_RPT_IND_FLOW_DISABLED 0x06 + +#define QOS_EVENT_RPT_IND_TLV_PHY_LINK_STATE_TYPE 0x01 +#define QOS_EVENT_RPT_IND_TLV_GLOBAL_FL_RPT_STATE 0x10 +#define QOS_EVENT_RPT_IND_TLV_GLOBAL_FL_RPT_TYPE 0x10 +#define QOS_EVENT_RPT_IND_TLV_TX_FLOW_TYPE 0x11 +#define QOS_EVENT_RPT_IND_TLV_RX_FLOW_TYPE 0x12 +#define QOS_EVENT_RPT_IND_TLV_TX_FILTER_TYPE 0x13 +#define QOS_EVENT_RPT_IND_TLV_RX_FILTER_TYPE 0x14 +#define QOS_EVENT_RPT_IND_TLV_FLOW_SPEC 0x10 +#define QOS_EVENT_RPT_IND_TLV_FILTER_SPEC 0x10 + +typedef struct _QOS_EVENT_RPT_IND_TLV_PHY_LINK_STATE +{ + UCHAR TLVType; // 0x01 + USHORT TLVLength; // 1 + UCHAR PhyLinkState; // 0-dormant, 1-active +} QOS_EVENT_RPT_IND_TLV_PHY_LINK_STATE, *PQOS_EVENT_RPT_IND_TLV_PHY_LINK_STATE; + +typedef struct _QOS_EVENT_RPT_IND_TLV_GLOBAL_FL_RPT +{ + UCHAR TLVType; // 0x10 + USHORT TLVLength; // 6 + ULONG QosId; + UCHAR NewFlow; // 1: newly added flow; 0: existing flow + UCHAR StateChange; // 1: activated; 2: modified; 3: deleted; + // 4: suspended(delete); 5: enabled; 6: disabled +} QOS_EVENT_RPT_IND_TLV_GLOBAL_FL_RPT, *PQOS_EVENT_RPT_IND_TLV_GLOBAL_FL_RPT; + +// QOS Flow + +typedef struct _QOS_EVENT_RPT_IND_TLV_FLOW +{ + UCHAR TLVType; // 0x10-TX flow; 0x11-RX flow + USHORT TLVLength; // var + // embedded TLV's +} QOS_EVENT_RPT_IND_TLV_TX_FLOW, *PQOS_EVENT_RPT_IND_TLV_TX_FLOW; + +#define QOS_FLOW_TLV_IP_FLOW_IDX_TYPE 0x10 +#define QOS_FLOW_TLV_IP_FLOW_TRAFFIC_CLASS_TYPE 0x11 +#define QOS_FLOW_TLV_IP_FLOW_DATA_RATE_MIN_MAX_TYPE 0x12 +#define QOS_FLOW_TLV_IP_FLOW_DATA_RATE_TOKEN_BUCKET_TYPE 0x13 +#define QOS_FLOW_TLV_IP_FLOW_LATENCY_TYPE 0x14 +#define QOS_FLOW_TLV_IP_FLOW_JITTER_TYPE 0x15 +#define QOS_FLOW_TLV_IP_FLOW_PKT_ERR_RATE_TYPE 0x16 +#define QOS_FLOW_TLV_IP_FLOW_MIN_PKT_SIZE_TYPE 0x17 +#define QOS_FLOW_TLV_IP_FLOW_MAX_PKT_SIZE_TYPE 0x18 +#define QOS_FLOW_TLV_IP_FLOW_3GPP_BIT_ERR_RATE_TYPE 0x19 +#define QOS_FLOW_TLV_IP_FLOW_3GPP_TRAF_PRIORITY_TYPE 0x1A +#define QOS_FLOW_TLV_IP_FLOW_3GPP2_PROFILE_ID_TYPE 0x1B + +typedef struct _QOS_FLOW_TLV_IP_FLOW_IDX +{ + UCHAR TLVType; // 0x10 + USHORT TLVLength; // 1 + UCHAR IpFlowIndex; +} QOS_FLOW_TLV_IP_FLOW_IDX, *PQOS_FLOW_TLV_IP_FLOW_IDX; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_TRAFFIC_CLASS +{ + UCHAR TLVType; // 0x11 + USHORT TLVLength; // 1 + UCHAR TrafficClass; +} QOS_FLOW_TLV_IP_FLOW_TRAFFIC_CLASS, *PQOS_FLOW_TLV_IP_FLOW_TRAFFIC_CLASS; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_DATA_RATE_MIN_MAX +{ + UCHAR TLVType; // 0x12 + USHORT TLVLength; // 8 + ULONG DataRateMax; + ULONG GuaranteedRate; +} QOS_FLOW_TLV_IP_FLOW_DATA_RATE_MIN_MAX, *PQOS_FLOW_TLV_IP_FLOW_DATA_RATE_MIN_MAX; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_DATA_RATE_TOKEN_BUCKET +{ + UCHAR TLVType; // 0x13 + USHORT TLVLength; // 12 + ULONG PeakRate; + ULONG TokenRate; + ULONG BucketSize; +} QOS_FLOW_TLV_IP_FLOW_DATA_RATE_TOKEN_BUCKET, *PQOS_FLOW_TLV_IP_FLOW_DATA_RATE_TOKEN_BUCKET; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_LATENCY +{ + UCHAR TLVType; // 0x14 + USHORT TLVLength; // 4 + ULONG IpFlowLatency; +} QOS_FLOW_TLV_IP_FLOW_LATENCY, *PQOS_FLOW_TLV_IP_FLOW_LATENCY; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_JITTER +{ + UCHAR TLVType; // 0x15 + USHORT TLVLength; // 4 + ULONG IpFlowJitter; +} QOS_FLOW_TLV_IP_FLOW_JITTER, *PQOS_FLOW_TLV_IP_FLOW_JITTER; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_PKT_ERR_RATE +{ + UCHAR TLVType; // 0x16 + USHORT TLVLength; // 4 + USHORT ErrRateMultiplier; + USHORT ErrRateExponent; +} QOS_FLOW_TLV_IP_FLOW_PKT_ERR_RATE, *PQOS_FLOW_TLV_IP_FLOW_PKT_ERR_RATE; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_MIN_PKT_SIZE +{ + UCHAR TLVType; // 0x17 + USHORT TLVLength; // 4 + ULONG MinPolicedPktSize; +} QOS_FLOW_TLV_IP_FLOW_MIN_PKT_SIZE, *PQOS_FLOW_TLV_IP_FLOW_MIN_PKT_SIZE; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_MAX_PKT_SIZE +{ + UCHAR TLVType; // 0x18 + USHORT TLVLength; // 4 + ULONG MaxAllowedPktSize; +} QOS_FLOW_TLV_IP_FLOW_MAX_PKT_SIZE, *PQOS_FLOW_TLV_IP_FLOW_MAX_PKT_SIZE; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_3GPP_BIT_ERR_RATE +{ + UCHAR TLVType; // 0x19 + USHORT TLVLength; // 1 + UCHAR ResidualBitErrorRate; +} QOS_FLOW_TLV_IP_FLOW_3GPP_BIT_ERR_RATE, *PQOS_FLOW_TLV_IP_FLOW_3GPP_BIT_ERR_RATE; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_3GPP_TRAF_PRIORITY +{ + UCHAR TLVType; // 0x1A + USHORT TLVLength; // 1 + UCHAR TrafficHandlingPriority; +} QOS_FLOW_TLV_IP_FLOW_3GPP_TRAF_PRIORITY, *PQOS_FLOW_TLV_IP_FLOW_3GPP_TRAF_PRIORITY; + +typedef struct _QOS_FLOW_TLV_IP_FLOW_3GPP2_PROFILE_ID +{ + UCHAR TLVType; // 0x1B + USHORT TLVLength; // 2 + USHORT ProfileId; +} QOS_FLOW_TLV_IP_FLOW_3GPP2_PROFILE_ID, *PQOS_FLOW_TLV_IP_FLOW_3GPP2_PROFILE_ID; + +// QOS Filter + +#define QOS_FILTER_TLV_IP_FILTER_IDX_TYPE 0x10 +#define QOS_FILTER_TLV_IP_VERSION_TYPE 0x11 +#define QOS_FILTER_TLV_IPV4_SRC_ADDR_TYPE 0x12 +#define QOS_FILTER_TLV_IPV4_DEST_ADDR_TYPE 0x13 +#define QOS_FILTER_TLV_NEXT_HDR_PROTOCOL_TYPE 0x14 +#define QOS_FILTER_TLV_IPV4_TYPE_OF_SERVICE_TYPE 0x15 +#define QOS_FILTER_TLV_TCP_UDP_PORT_SRC_TCP_TYPE 0x1B +#define QOS_FILTER_TLV_TCP_UDP_PORT_DEST_TCP_TYPE 0x1C +#define QOS_FILTER_TLV_TCP_UDP_PORT_SRC_UDP_TYPE 0x1D +#define QOS_FILTER_TLV_TCP_UDP_PORT_DEST_UDP_TYPE 0x1E +#define QOS_FILTER_TLV_ICMP_FILTER_MSG_TYPE_TYPE 0x1F +#define QOS_FILTER_TLV_ICMP_FILTER_MSG_CODE_TYPE 0x20 +#define QOS_FILTER_TLV_TCP_UDP_PORT_SRC_TYPE 0x24 +#define QOS_FILTER_TLV_TCP_UDP_PORT_DEST_TYPE 0x25 + +typedef struct _QOS_EVENT_RPT_IND_TLV_FILTER +{ + UCHAR TLVType; // 0x12-TX filter; 0x13-RX filter + USHORT TLVLength; // var + // embedded TLV's +} QOS_EVENT_RPT_IND_TLV_RX_FILTER, *PQOS_EVENT_RPT_IND_TLV_RX_FILTER; + +typedef struct _QOS_FILTER_TLV_IP_FILTER_IDX +{ + UCHAR TLVType; // 0x10 + USHORT TLVLength; // 1 + UCHAR IpFilterIndex; +} QOS_FILTER_TLV_IP_FILTER_IDX, *PQOS_FILTER_TLV_IP_FILTER_IDX; + +typedef struct _QOS_FILTER_TLV_IP_VERSION +{ + UCHAR TLVType; // 0x11 + USHORT TLVLength; // 1 + UCHAR IpVersion; +} QOS_FILTER_TLV_IP_VERSION, *PQOS_FILTER_TLV_IP_VERSION; + +typedef struct _QOS_FILTER_TLV_IPV4_SRC_ADDR +{ + UCHAR TLVType; // 0x12 + USHORT TLVLength; // 8 + ULONG IpSrcAddr; + ULONG IpSrcSubnetMask; +} QOS_FILTER_TLV_IPV4_SRC_ADDR, *PQOS_FILTER_TLV_IPV4_SRC_ADDR; + +typedef struct _QOS_FILTER_TLV_IPV4_DEST_ADDR +{ + UCHAR TLVType; // 0x13 + USHORT TLVLength; // 8 + ULONG IpDestAddr; + ULONG IpDestSubnetMask; +} QOS_FILTER_TLV_IPV4_DEST_ADDR, *PQOS_FILTER_TLV_IPV4_DEST_ADDR; + +typedef struct _QOS_FILTER_TLV_NEXT_HDR_PROTOCOL +{ + UCHAR TLVType; // 0x14 + USHORT TLVLength; // 1 + UCHAR NextHdrProtocol; +} QOS_FILTER_TLV_NEXT_HDR_PROTOCOL, *PQOS_FILTER_TLV_NEXT_HDR_PROTOCOL; + +typedef struct _QOS_FILTER_TLV_IPV4_TYPE_OF_SERVICE +{ + UCHAR TLVType; // 0x15 + USHORT TLVLength; // 2 + UCHAR Ipv4TypeOfService; + UCHAR Ipv4TypeOfServiceMask; +} QOS_FILTER_TLV_IPV4_TYPE_OF_SERVICE, *PQOS_FILTER_TLV_IPV4_TYPE_OF_SERVICE; + +typedef struct _QOS_FILTER_TLV_TCP_UDP_PORT +{ + UCHAR TLVType; // source port: 0x1B-TCP; 0x1D-UDP + // dest port: 0x1C-TCP; 0x1E-UDP + USHORT TLVLength; // 4 + USHORT FilterPort; + USHORT FilterPortRange; +} QOS_FILTER_TLV_TCP_UDP_PORT, *PQOS_FILTER_TLV_TCP_UDP_PORT; + +typedef struct _QOS_FILTER_TLV_ICMP_FILTER_MSG_TYPE +{ + UCHAR TLVType; // 0x1F + USHORT TLVLength; // 1 + UCHAR IcmpFilterMsgType; +} QOS_FILTER_TLV_ICMP_FILTER_MSG_TYPE, *PQOS_FILTER_TLV_ICMP_FILTER_MSG_TYPE; + +typedef struct _QOS_FILTER_TLV_ICMP_FILTER_MSG_CODE +{ + UCHAR TLVType; // 0x20 + USHORT TLVLength; // 1 + UCHAR IcmpFilterMsgCode; +} QOS_FILTER_TLV_ICMP_FILTER_MSG_CODE, *PQOS_FILTER_TLV_ICMP_FILTER_MSG_CODE; + +#define QOS_FILTER_PRECEDENCE_INVALID 256 +#define QOS_FILTER_TLV_PRECEDENCE_TYPE 0x22 +#define QOS_FILTER_TLV_ID_TYPE 0x23 + +typedef struct _QOS_FILTER_TLV_PRECEDENCE +{ + UCHAR TLVType; // 0x22 + USHORT TLVLength; // 2 + USHORT Precedence; // precedence of the filter +} QOS_FILTER_TLV_PRECEDENCE, *PQOS_FILTER_TLV_PRECEDENCE; + +typedef struct _QOS_FILTER_TLV_ID +{ + UCHAR TLVType; // 0x23 + USHORT TLVLength; // 2 + USHORT FilterId; // filter ID +} QOS_FILTER_TLV_ID, *PQOS_FILTER_TLV_ID; + +#ifdef QCQOS_IPV6 + +#define QOS_FILTER_TLV_IPV6_SRC_ADDR_TYPE 0x16 +#define QOS_FILTER_TLV_IPV6_DEST_ADDR_TYPE 0x17 +#define QOS_FILTER_TLV_IPV6_NEXT_HDR_PROTOCOL_TYPE 0x14 // same as IPV4 +#define QOS_FILTER_TLV_IPV6_TRAFFIC_CLASS_TYPE 0x19 +#define QOS_FILTER_TLV_IPV6_FLOW_LABEL_TYPE 0x1A + +typedef struct _QOS_FILTER_TLV_IPV6_SRC_ADDR +{ + UCHAR TLVType; // 0x16 + USHORT TLVLength; // 17 + UCHAR IpSrcAddr[16]; + UCHAR IpSrcAddrPrefixLen; // [0..128] +} QOS_FILTER_TLV_IPV6_SRC_ADDR, *PQOS_FILTER_TLV_IPV6_SRC_ADDR; + +typedef struct _QOS_FILTER_TLV_IPV6_DEST_ADDR +{ + UCHAR TLVType; // 0x17 + USHORT TLVLength; // 17 + UCHAR IpDestAddr[16]; + UCHAR IpDestAddrPrefixLen; // [0..128] +} QOS_FILTER_TLV_IPV6_DEST_ADDR, *PQOS_FILTER_TLV_IPV6_DEST_ADDR; + +#define QOS_FILTER_IPV6_NEXT_HDR_PROTOCOL_TCP 0x06 +#define QOS_FILTER_IPV6_NEXT_HDR_PROTOCOL_UDP 0x11 + +typedef struct _QOS_FILTER_TLV_IPV6_TRAFFIC_CLASS +{ + UCHAR TLVType; // 0x19 + USHORT TLVLength; // 2 + UCHAR TrafficClass; + UCHAR TrafficClassMask; // compare the first 6 bits only +} QOS_FILTER_TLV_IPV6_TRAFFIC_CLASS, *PQOS_FILTER_TLV_IPV6_TRAFFIC_CLASS; + +typedef struct _QOS_FILTER_TLV_IPV6_FLOW_LABEL +{ + UCHAR TLVType; // 0x1A + USHORT TLVLength; // 4 + ULONG FlowLabel; +} QOS_FILTER_TLV_IPV6_FLOW_LABEL, *PQOS_FILTER_TLV_IPV6_FLOW_LABEL; + +#endif // QCQOS_IPV6 +#endif + +// ======================= WMS ============================== +#define QMIWMS_SET_EVENT_REPORT_REQ 0x0001 +#define QMIWMS_SET_EVENT_REPORT_RESP 0x0001 +#define QMIWMS_EVENT_REPORT_IND 0x0001 +#define QMIWMS_RAW_SEND_REQ 0x0020 +#define QMIWMS_RAW_SEND_RESP 0x0020 +#define QMIWMS_RAW_WRITE_REQ 0x0021 +#define QMIWMS_RAW_WRITE_RESP 0x0021 +#define QMIWMS_RAW_READ_REQ 0x0022 +#define QMIWMS_RAW_READ_RESP 0x0022 +#define QMIWMS_MODIFY_TAG_REQ 0x0023 +#define QMIWMS_MODIFY_TAG_RESP 0x0023 +#define QMIWMS_DELETE_REQ 0x0024 +#define QMIWMS_DELETE_RESP 0x0024 +#define QMIWMS_GET_MESSAGE_PROTOCOL_REQ 0x0030 +#define QMIWMS_GET_MESSAGE_PROTOCOL_RESP 0x0030 +#define QMIWMS_LIST_MESSAGES_REQ 0x0031 +#define QMIWMS_LIST_MESSAGES_RESP 0x0031 +#define QMIWMS_GET_SMSC_ADDRESS_REQ 0x0034 +#define QMIWMS_GET_SMSC_ADDRESS_RESP 0x0034 +#define QMIWMS_SET_SMSC_ADDRESS_REQ 0x0035 +#define QMIWMS_SET_SMSC_ADDRESS_RESP 0x0035 +#define QMIWMS_GET_STORE_MAX_SIZE_REQ 0x0036 +#define QMIWMS_GET_STORE_MAX_SIZE_RESP 0x0036 + + +#define WMS_MESSAGE_PROTOCOL_CDMA 0x00 +#define WMS_MESSAGE_PROTOCOL_WCDMA 0x01 + +#if 0 +typedef struct _QMIWMS_GET_MESSAGE_PROTOCOL_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMIWMS_GET_MESSAGE_PROTOCOL_REQ_MSG, *PQMIWMS_GET_MESSAGE_PROTOCOL_REQ_MSG; + +typedef struct _QMIWMS_GET_MESSAGE_PROTOCOL_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR MessageProtocol; +} QMIWMS_GET_MESSAGE_PROTOCOL_RESP_MSG, *PQMIWMS_GET_MESSAGE_PROTOCOL_RESP_MSG; + +typedef struct _QMIWMS_GET_STORE_MAX_SIZE_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; +} QMIWMS_GET_STORE_MAX_SIZE_REQ_MSG, *PQMIWMS_GET_STORE_MAX_SIZE_REQ_MSG; + +typedef struct _QMIWMS_GET_STORE_MAX_SIZE_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + ULONG MemStoreMaxSize; +} QMIWMS_GET_STORE_MAX_SIZE_RESP_MSG, *PQMIWMS_GET_STORE_MAX_SIZE_RESP_MSG; + +typedef struct _REQUEST_TAG +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR TagType; +} REQUEST_TAG, *PREQUEST_TAG; + +typedef struct _QMIWMS_LIST_MESSAGES_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; +} QMIWMS_LIST_MESSAGES_REQ_MSG, *PQMIWMS_LIST_MESSAGES_REQ_MSG; + +typedef struct _QMIWMS_MESSAGE +{ + ULONG MessageIndex; + UCHAR TagType; +} QMIWMS_MESSAGE, *PQMIWMS_MESSAGE; + +typedef struct _QMIWMS_LIST_MESSAGES_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + ULONG NumMessages; +} QMIWMS_LIST_MESSAGES_RESP_MSG, *PQMIWMS_LIST_MESSAGES_RESP_MSG; + +typedef struct _QMIWMS_RAW_READ_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; + ULONG MemoryIndex; +} QMIWMS_RAW_READ_REQ_MSG, *PQMIWMS_RAW_READ_REQ_MSG; + +typedef struct _QMIWMS_RAW_READ_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR TagType; + UCHAR Format; + USHORT MessageLength; + UCHAR Message; +} QMIWMS_RAW_READ_RESP_MSG, *PQMIWMS_RAW_READ_RESP_MSG; + +typedef struct _QMIWMS_MODIFY_TAG_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; + ULONG MemoryIndex; + UCHAR TagType; +} QMIWMS_MODIFY_TAG_REQ_MSG, *PQMIWMS_MODIFY_TAG_REQ_MSG; + +typedef struct _QMIWMS_MODIFY_TAG_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWMS_MODIFY_TAG_RESP_MSG, *PQMIWMS_MODIFY_TAG_RESP_MSG; + +typedef struct _QMIWMS_RAW_SEND_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR SmsFormat; + USHORT SmsLength; + UCHAR SmsMessage; +} QMIWMS_RAW_SEND_REQ_MSG, *PQMIWMS_RAW_SEND_REQ_MSG; + +typedef struct _RAW_SEND_CAUSE_CODE +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT CauseCode; +} RAW_SEND_CAUSE_CODE, *PRAW_SEND_CAUSE_CODE; + + +typedef struct _QMIWMS_RAW_SEND_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWMS_RAW_SEND_RESP_MSG, *PQMIWMS_RAW_SEND_RESP_MSG; + + +typedef struct _WMS_DELETE_MESSAGE_INDEX +{ + UCHAR TLVType; + USHORT TLVLength; + ULONG MemoryIndex; +} WMS_DELETE_MESSAGE_INDEX, *PWMS_DELETE_MESSAGE_INDEX; + +typedef struct _WMS_DELETE_MESSAGE_TAG +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR MessageTag; +} WMS_DELETE_MESSAGE_TAG, *PWMS_DELETE_MESSAGE_TAG; + +typedef struct _QMIWMS_DELETE_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; +} QMIWMS_DELETE_REQ_MSG, *PQMIWMS_DELETE_REQ_MSG; + +typedef struct _QMIWMS_DELETE_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWMS_DELETE_RESP_MSG, *PQMIWMS_DELETE_RESP_MSG; + + +typedef struct _QMIWMS_GET_SMSC_ADDRESS_REQ_MSG +{ + USHORT Type; + USHORT Length; +} QMIWMS_GET_SMSC_ADDRESS_REQ_MSG, *PQMIWMS_GET_SMSC_ADDRESS_REQ_MSG; + +typedef struct _QMIWMS_SMSC_ADDRESS +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SMSCAddressType[3]; + UCHAR SMSCAddressLength; + UCHAR SMSCAddressDigits; +} QMIWMS_SMSC_ADDRESS, *PQMIWMS_SMSC_ADDRESS; + + +typedef struct _QMIWMS_GET_SMSC_ADDRESS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR SMSCAddress; +} QMIWMS_GET_SMSC_ADDRESS_RESP_MSG, *PQMIWMS_GET_SMSC_ADDRESS_RESP_MSG; + +typedef struct _QMIWMS_SET_SMSC_ADDRESS_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR SMSCAddress; +} QMIWMS_SET_SMSC_ADDRESS_REQ_MSG, *PQMIWMS_SET_SMSC_ADDRESS_REQ_MSG; + +typedef struct _QMIWMS_SET_SMSC_ADDRESS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWMS_SET_SMSC_ADDRESS_RESP_MSG, *PQMIWMS_SET_SMSC_ADDRESS_RESP_MSG; + +typedef struct _QMIWMS_SET_EVENT_REPORT_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ReportNewMessage; +} QMIWMS_SET_EVENT_REPORT_REQ_MSG, *PQMIWMS_SET_EVENT_REPORT_REQ_MSG; + +typedef struct _QMIWMS_SET_EVENT_REPORT_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMIWMS_SET_EVENT_REPORT_RESP_MSG, *PQMIWMS_SET_EVENT_REPORT_RESP_MSG; + +typedef struct _QMIWMS_EVENT_REPORT_IND_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR StorageType; + ULONG StorageIndex; +} QMIWMS_EVENT_REPORT_IND_MSG, *PQMIWMS_EVENT_REPORT_IND_MSG; +#endif + +// ======================= End of WMS ============================== + + +// ======================= NAS ============================== +#define QMINAS_SET_EVENT_REPORT_REQ 0x0002 +#define QMINAS_SET_EVENT_REPORT_RESP 0x0002 +#define QMINAS_EVENT_REPORT_IND 0x0002 +#define QMINAS_GET_SIGNAL_STRENGTH_REQ 0x0020 +#define QMINAS_GET_SIGNAL_STRENGTH_RESP 0x0020 +#define QMINAS_PERFORM_NETWORK_SCAN_REQ 0x0021 +#define QMINAS_PERFORM_NETWORK_SCAN_RESP 0x0021 +#define QMINAS_INITIATE_NW_REGISTER_REQ 0x0022 +#define QMINAS_INITIATE_NW_REGISTER_RESP 0x0022 +#define QMINAS_INITIATE_ATTACH_REQ 0x0023 +#define QMINAS_INITIATE_ATTACH_RESP 0x0023 +#define QMINAS_GET_SERVING_SYSTEM_REQ 0x0024 +#define QMINAS_GET_SERVING_SYSTEM_RESP 0x0024 +#define QMINAS_SERVING_SYSTEM_IND 0x0024 +#define QMINAS_GET_HOME_NETWORK_REQ 0x0025 +#define QMINAS_GET_HOME_NETWORK_RESP 0x0025 +#define QMINAS_GET_PREFERRED_NETWORK_REQ 0x0026 +#define QMINAS_GET_PREFERRED_NETWORK_RESP 0x0026 +#define QMINAS_SET_PREFERRED_NETWORK_REQ 0x0027 +#define QMINAS_SET_PREFERRED_NETWORK_RESP 0x0027 +#define QMINAS_GET_FORBIDDEN_NETWORK_REQ 0x0028 +#define QMINAS_GET_FORBIDDEN_NETWORK_RESP 0x0028 +#define QMINAS_SET_FORBIDDEN_NETWORK_REQ 0x0029 +#define QMINAS_SET_FORBIDDEN_NETWORK_RESP 0x0029 +#define QMINAS_SET_TECHNOLOGY_PREF_REQ 0x002A +#define QMINAS_SET_TECHNOLOGY_PREF_RESP 0x002A +#define QMINAS_GET_RF_BAND_INFO_REQ 0x0031 +#define QMINAS_GET_RF_BAND_INFO_RESP 0x0031 +#define QMINAS_GET_CELL_LOCATION_INFO_REQ 0x0043 +#define QMINAS_GET_CELL_LOCATION_INFO_RESP 0x0043 +#define QMINAS_GET_PLMN_NAME_REQ 0x0044 +#define QMINAS_GET_PLMN_NAME_RESP 0x0044 +#define QUECTEL_PACKET_TRANSFER_START_IND 0X100 +#define QUECTEL_PACKET_TRANSFER_END_IND 0X101 +#define QMINAS_GET_SYS_INFO_REQ 0x004D +#define QMINAS_GET_SYS_INFO_RESP 0x004D +#define QMINAS_SYS_INFO_IND 0x004E +#define QMINAS_GET_SIG_INFO_REQ 0x004F +#define QMINAS_GET_SIG_INFO_RESP 0x004F + +typedef struct _QMINAS_GET_HOME_NETWORK_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} __attribute__ ((packed)) QMINAS_GET_HOME_NETWORK_REQ_MSG, *PQMINAS_GET_HOME_NETWORK_REQ_MSG; + +typedef struct _HOME_NETWORK_SYSTEMID +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT SystemID; + USHORT NetworkID; +} __attribute__ ((packed)) HOME_NETWORK_SYSTEMID, *PHOME_NETWORK_SYSTEMID; + +typedef struct _HOME_NETWORK +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + UCHAR NetworkDesclen; + UCHAR NetworkDesc; +} __attribute__ ((packed)) HOME_NETWORK, *PHOME_NETWORK; + +#if 0 +typedef struct _HOME_NETWORK_EXT +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + UCHAR NetworkDescDisp; + UCHAR NetworkDescEncoding; + UCHAR NetworkDesclen; + UCHAR NetworkDesc; +} HOME_NETWORK_EXT, *PHOME_NETWORK_EXT; + +typedef struct _QMINAS_GET_HOME_NETWORK_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} QMINAS_GET_HOME_NETWORK_RESP_MSG, *PQMINAS_GET_HOME_NETWORK_RESP_MSG; + +typedef struct _QMINAS_GET_PREFERRED_NETWORK_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMINAS_GET_PREFERRED_NETWORK_REQ_MSG, *PQMINAS_GET_PREFERRED_NETWORK_REQ_MSG; + + +typedef struct _PREFERRED_NETWORK +{ + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + USHORT RadioAccess; +} PREFERRED_NETWORK, *PPREFERRED_NETWORK; + +typedef struct _QMINAS_GET_PREFERRED_NETWORK_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; // 0x01 - required parameter + USHORT TLV2Length; // length of the mfr string + USHORT NumPreferredNetwork; +} QMINAS_GET_PREFERRED_NETWORK_RESP_MSG, *PQMINAS_GET_PREFERRED_NETWORK_RESP_MSG; + +typedef struct _QMINAS_GET_FORBIDDEN_NETWORK_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMINAS_GET_FORBIDDEN_NETWORK_REQ_MSG, *PQMINAS_GET_FORBIDDEN_NETWORK_REQ_MSG; + +typedef struct _FORBIDDEN_NETWORK +{ + USHORT MobileCountryCode; + USHORT MobileNetworkCode; +} FORBIDDEN_NETWORK, *PFORBIDDEN_NETWORK; + +typedef struct _QMINAS_GET_FORBIDDEN_NETWORK_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; // 0x01 - required parameter + USHORT TLV2Length; // length of the mfr string + USHORT NumForbiddenNetwork; +} QMINAS_GET_FORBIDDEN_NETWORK_RESP_MSG, *PQMINAS_GET_FORBIDDEN_NETWORK_RESP_MSG; + +typedef struct _QMINAS_GET_SERVING_SYSTEM_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMINAS_GET_SERVING_SYSTEM_REQ_MSG, *PQMINAS_GET_SERVING_SYSTEM_REQ_MSG; + +typedef struct _QMINAS_ROAMING_INDICATOR_MSG +{ + UCHAR TLVType; // 0x01 - required parameter + USHORT TLVLength; // length of the mfr string + UCHAR RoamingIndicator; +} QMINAS_ROAMING_INDICATOR_MSG, *PQMINAS_ROAMING_INDICATOR_MSG; +#endif + +typedef struct _QMINAS_DATA_CAP +{ + UCHAR TLVType; // 0x01 - required parameter + USHORT TLVLength; // length of the mfr string + UCHAR DataCapListLen; + UCHAR DataCap; +} __attribute__ ((packed)) QMINAS_DATA_CAP, *PQMINAS_DATA_CAP; + +typedef struct _QMINAS_CURRENT_PLMN_MSG +{ + UCHAR TLVType; // 0x01 - required parameter + USHORT TLVLength; // length of the mfr string + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + UCHAR NetworkDesclen; + UCHAR NetworkDesc; +} __attribute__ ((packed)) QMINAS_CURRENT_PLMN_MSG, *PQMINAS_CURRENT_PLMN_MSG; + +typedef struct _QMINAS_GET_SERVING_SYSTEM_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMINAS_GET_SERVING_SYSTEM_RESP_MSG, *PQMINAS_GET_SERVING_SYSTEM_RESP_MSG; + +typedef struct _SERVING_SYSTEM +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR RegistrationState; + UCHAR CSAttachedState; + UCHAR PSAttachedState; + UCHAR RegistredNetwork; + UCHAR InUseRadioIF; + UCHAR RadioIF; +} __attribute__ ((packed)) SERVING_SYSTEM, *PSERVING_SYSTEM; + +typedef struct _QMINAS_GET_SYS_INFO_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMINAS_GET_SYS_INFO_RESP_MSG, *PQMINAS_GET_SYS_INFO_RESP_MSG; + +typedef struct _QMINAS_SYS_INFO_IND_MSG +{ + USHORT Type; + USHORT Length; +} __attribute__ ((packed)) QMINAS_SYS_INFO_IND_MSG, *PQMINAS_SYS_INFO_IND_MSG; + +typedef struct _SERVICE_STATUS_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvStatus; + UCHAR true_srv_status; + UCHAR IsPrefDataPath; +} __attribute__ ((packed)) SERVICE_STATUS_INFO, *PSERVICE_STATUS_INFO; + +typedef struct _CDMA_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR IsSysPrlMatchValid; + UCHAR IsSysPrlMatch; + UCHAR PRevInUseValid; + UCHAR PRevInUse; + UCHAR BSPRevValid; + UCHAR BSPRev; + UCHAR CCSSupportedValid; + UCHAR CCSSupported; + UCHAR CDMASysIdValid; + USHORT SID; + USHORT NID; + UCHAR BSInfoValid; + USHORT BaseID; + ULONG BaseLAT; + ULONG BaseLONG; + UCHAR PacketZoneValid; + USHORT PacketZone; + UCHAR NetworkIdValid; + UCHAR MCC[3]; + UCHAR MNC[3]; +} __attribute__ ((packed)) CDMA_SYSTEM_INFO, *PCDMA_SYSTEM_INFO; + +typedef struct _HDR_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR IsSysPrlMatchValid; + UCHAR IsSysPrlMatch; + UCHAR HdrPersonalityValid; + UCHAR HdrPersonality; + UCHAR HdrActiveProtValid; + UCHAR HdrActiveProt; + UCHAR is856SysIdValid; + UCHAR is856SysId[16]; +} __attribute__ ((packed)) HDR_SYSTEM_INFO, *PHDR_SYSTEM_INFO; + +typedef struct _GSM_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR LacValid; + USHORT Lac; + UCHAR CellIdValid; + ULONG CellId; + UCHAR RegRejectInfoValid; + UCHAR RejectSrvDomain; + UCHAR RejCause; + UCHAR NetworkIdValid; + UCHAR MCC[3]; + UCHAR MNC[3]; + UCHAR EgprsSuppValid; + UCHAR EgprsSupp; + UCHAR DtmSuppValid; + UCHAR DtmSupp; +} __attribute__ ((packed)) GSM_SYSTEM_INFO, *PGSM_SYSTEM_INFO; + +typedef struct _WCDMA_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR LacValid; + USHORT Lac; + UCHAR CellIdValid; + ULONG CellId; + UCHAR RegRejectInfoValid; + UCHAR RejectSrvDomain; + UCHAR RejCause; + UCHAR NetworkIdValid; + UCHAR MCC[3]; + UCHAR MNC[3]; + UCHAR HsCallStatusValid; + UCHAR HsCallStatus; + UCHAR HsIndValid; + UCHAR HsInd; + UCHAR PscValid; + UCHAR Psc; +} __attribute__ ((packed)) WCDMA_SYSTEM_INFO, *PWCDMA_SYSTEM_INFO; + +typedef struct _LTE_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR LacValid; + USHORT Lac; + UCHAR CellIdValid; + ULONG CellId; + UCHAR RegRejectInfoValid; + UCHAR RejectSrvDomain; + UCHAR RejCause; + UCHAR NetworkIdValid; + UCHAR MCC[3]; + UCHAR MNC[3]; + UCHAR TacValid; + USHORT Tac; +} __attribute__ ((packed)) LTE_SYSTEM_INFO, *PLTE_SYSTEM_INFO; + +typedef struct _TDSCDMA_SYSTEM_INFO +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SrvDomainValid; + UCHAR SrvDomain; + UCHAR SrvCapabilityValid; + UCHAR SrvCapability; + UCHAR RoamStatusValid; + UCHAR RoamStatus; + UCHAR IsSysForbiddenValid; + UCHAR IsSysForbidden; + UCHAR LacValid; + USHORT Lac; + UCHAR CellIdValid; + ULONG CellId; + UCHAR RegRejectInfoValid; + UCHAR RejectSrvDomain; + UCHAR RejCause; + UCHAR NetworkIdValid; + UCHAR MCC[3]; + UCHAR MNC[3]; + UCHAR HsCallStatusValid; + UCHAR HsCallStatus; + UCHAR HsIndValid; + UCHAR HsInd; + UCHAR CellParameterIdValid; + USHORT CellParameterId; + UCHAR CellBroadcastCapValid; + ULONG CellBroadcastCap; + UCHAR CsBarStatusValid; + ULONG CsBarStatus; + UCHAR PsBarStatusValid; + ULONG PsBarStatus; + UCHAR CipherDomainValid; + UCHAR CipherDomain; +} __attribute__ ((packed)) TDSCDMA_SYSTEM_INFO, *PTDSCDMA_SYSTEM_INFO; + +typedef enum { + NAS_SYS_SRV_STATUS_NO_SRV_V01 = 0, + NAS_SYS_SRV_STATUS_LIMITED_V01 = 1, + NAS_SYS_SRV_STATUS_SRV_V01 = 2, + NAS_SYS_SRV_STATUS_LIMITED_REGIONAL_V01 = 3, + NAS_SYS_SRV_STATUS_PWR_SAVE_V01 = 4, +}nas_service_status_enum_type_v01; + +typedef enum { + SYS_SRV_DOMAIN_NO_SRV_V01 = 0, + SYS_SRV_DOMAIN_CS_ONLY_V01 = 1, + SYS_SRV_DOMAIN_PS_ONLY_V01 = 2, + SYS_SRV_DOMAIN_CS_PS_V01 = 3, + SYS_SRV_DOMAIN_CAMPED_V01 = 4, +}nas_service_domain_enum_type_v01; + +typedef enum { + QMI_NAS_RADIO_INTERFACE_UNKNOWN = -1, + QMI_NAS_RADIO_INTERFACE_NONE = 0x00, + QMI_NAS_RADIO_INTERFACE_CDMA_1X = 0x01, + QMI_NAS_RADIO_INTERFACE_CDMA_1XEVDO = 0x02, + QMI_NAS_RADIO_INTERFACE_AMPS = 0x03, + QMI_NAS_RADIO_INTERFACE_GSM = 0x04, + QMI_NAS_RADIO_INTERFACE_UMTS = 0x05, + QMI_NAS_RADIO_INTERFACE_LTE = 0x08, + QMI_NAS_RADIO_INTERFACE_TD_SCDMA = 0x09, + QMI_NAS_RADIO_INTERFACE_5GNR = 0x0C, +} QMI_NAS_RADIO_INTERFACE_E; + +typedef enum { + QMI_NAS_ACTIVE_BAND_BC_0 = 0, + QMI_NAS_ACTIVE_BAND_BC_1 = 1, + QMI_NAS_ACTIVE_BAND_BC_2 = 2, + QMI_NAS_ACTIVE_BAND_BC_3 = 3, + QMI_NAS_ACTIVE_BAND_BC_4 = 4, + QMI_NAS_ACTIVE_BAND_BC_5 = 5, + QMI_NAS_ACTIVE_BAND_BC_6 = 6, + QMI_NAS_ACTIVE_BAND_BC_7 = 7, + QMI_NAS_ACTIVE_BAND_BC_8 = 8, + QMI_NAS_ACTIVE_BAND_BC_9 = 9, + QMI_NAS_ACTIVE_BAND_BC_10 = 10, + QMI_NAS_ACTIVE_BAND_BC_11 = 11, + QMI_NAS_ACTIVE_BAND_BC_12 = 12, + QMI_NAS_ACTIVE_BAND_BC_13 = 13, + QMI_NAS_ACTIVE_BAND_BC_14 = 14, + QMI_NAS_ACTIVE_BAND_BC_15 = 15, + QMI_NAS_ACTIVE_BAND_BC_16 = 16, + QMI_NAS_ACTIVE_BAND_BC_17 = 17, + QMI_NAS_ACTIVE_BAND_BC_18 = 18, + QMI_NAS_ACTIVE_BAND_BC_19 = 19, + QMI_NAS_ACTIVE_BAND_GSM_450 = 40, + QMI_NAS_ACTIVE_BAND_GSM_480 = 41, + QMI_NAS_ACTIVE_BAND_GSM_750 = 42, + QMI_NAS_ACTIVE_BAND_GSM_850 = 43, + QMI_NAS_ACTIVE_BAND_GSM_900_EXTENDED = 44, + QMI_NAS_ACTIVE_BAND_GSM_900_PRIMARY = 45, + QMI_NAS_ACTIVE_BAND_GSM_900_RAILWAYS = 46, + QMI_NAS_ACTIVE_BAND_GSM_DCS_1800 = 47, + QMI_NAS_ACTIVE_BAND_GSM_PCS_1900 = 48, + QMI_NAS_ACTIVE_BAND_WCDMA_2100 = 80, + QMI_NAS_ACTIVE_BAND_WCDMA_PCS_1900 = 81, + QMI_NAS_ACTIVE_BAND_WCDMA_DCS_1800 = 82, + QMI_NAS_ACTIVE_BAND_WCDMA_1700_US = 83, + QMI_NAS_ACTIVE_BAND_WCDMA_850 = 84, + QMI_NAS_ACTIVE_BAND_WCDMA_800 = 85, + QMI_NAS_ACTIVE_BAND_WCDMA_2600 = 86, + QMI_NAS_ACTIVE_BAND_WCDMA_900 = 87, + QMI_NAS_ACTIVE_BAND_WCDMA_1700_JAPAN = 88, + QMI_NAS_ACTIVE_BAND_WCDMA_1500_JAPAN = 90, + QMI_NAS_ACTIVE_BAND_WCDMA_850_JAPAN = 91, + QMI_NAS_ACTIVE_BAND_EUTRAN_1 = 120, + QMI_NAS_ACTIVE_BAND_EUTRAN_2 = 121, + QMI_NAS_ACTIVE_BAND_EUTRAN_3 = 122, + QMI_NAS_ACTIVE_BAND_EUTRAN_4 = 123, + QMI_NAS_ACTIVE_BAND_EUTRAN_5 = 124, + QMI_NAS_ACTIVE_BAND_EUTRAN_6 = 125, + QMI_NAS_ACTIVE_BAND_EUTRAN_7 = 126, + QMI_NAS_ACTIVE_BAND_EUTRAN_8 = 127, + QMI_NAS_ACTIVE_BAND_EUTRAN_9 = 128, + QMI_NAS_ACTIVE_BAND_EUTRAN_10 = 129, + QMI_NAS_ACTIVE_BAND_EUTRAN_11 = 130, + QMI_NAS_ACTIVE_BAND_EUTRAN_12 = 131, + QMI_NAS_ACTIVE_BAND_EUTRAN_13 = 132, + QMI_NAS_ACTIVE_BAND_EUTRAN_14 = 133, + QMI_NAS_ACTIVE_BAND_EUTRAN_17 = 134, + QMI_NAS_ACTIVE_BAND_EUTRAN_18 = 143, + QMI_NAS_ACTIVE_BAND_EUTRAN_19 = 144, + QMI_NAS_ACTIVE_BAND_EUTRAN_20 = 145, + QMI_NAS_ACTIVE_BAND_EUTRAN_21 = 146, + QMI_NAS_ACTIVE_BAND_EUTRAN_23 = 152, + QMI_NAS_ACTIVE_BAND_EUTRAN_24 = 147, + QMI_NAS_ACTIVE_BAND_EUTRAN_25 = 148, + QMI_NAS_ACTIVE_BAND_EUTRAN_26 = 153, + QMI_NAS_ACTIVE_BAND_EUTRAN_27 = 164, + QMI_NAS_ACTIVE_BAND_EUTRAN_28 = 158, + QMI_NAS_ACTIVE_BAND_EUTRAN_29 = 159, + QMI_NAS_ACTIVE_BAND_EUTRAN_30 = 160, + QMI_NAS_ACTIVE_BAND_EUTRAN_31 = 165, + QMI_NAS_ACTIVE_BAND_EUTRAN_32 = 154, + QMI_NAS_ACTIVE_BAND_EUTRAN_33 = 135, + QMI_NAS_ACTIVE_BAND_EUTRAN_34 = 136, + QMI_NAS_ACTIVE_BAND_EUTRAN_35 = 137, + QMI_NAS_ACTIVE_BAND_EUTRAN_36 = 138, + QMI_NAS_ACTIVE_BAND_EUTRAN_37 = 139, + QMI_NAS_ACTIVE_BAND_EUTRAN_38 = 140, + QMI_NAS_ACTIVE_BAND_EUTRAN_39 = 141, + QMI_NAS_ACTIVE_BAND_EUTRAN_40 = 142, + QMI_NAS_ACTIVE_BAND_EUTRAN_41 = 149, + QMI_NAS_ACTIVE_BAND_EUTRAN_42 = 150, + QMI_NAS_ACTIVE_BAND_EUTRAN_43 = 151, + QMI_NAS_ACTIVE_BAND_EUTRAN_46 = 163, + QMI_NAS_ACTIVE_BAND_EUTRAN_47 = 166, + QMI_NAS_ACTIVE_BAND_EUTRAN_48 = 167, + QMI_NAS_ACTIVE_BAND_EUTRAN_66 = 161, + QMI_NAS_ACTIVE_BAND_EUTRAN_71 = 168, + QMI_NAS_ACTIVE_BAND_EUTRAN_125 = 155, + QMI_NAS_ACTIVE_BAND_EUTRAN_126 = 156, + QMI_NAS_ACTIVE_BAND_EUTRAN_127 = 157, + QMI_NAS_ACTIVE_BAND_EUTRAN_250 = 162, + QMI_NAS_ACTIVE_BAND_TDSCDMA_A = 200, + QMI_NAS_ACTIVE_BAND_TDSCDMA_B = 201, + QMI_NAS_ACTIVE_BAND_TDSCDMA_C = 202, + QMI_NAS_ACTIVE_BAND_TDSCDMA_D = 203, + QMI_NAS_ACTIVE_BAND_TDSCDMA_E = 204, + QMI_NAS_ACTIVE_BAND_TDSCDMA_F = 205, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_1 = 250, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_2 = 251, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_3 = 252, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_5 = 253, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_7 = 254, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_8 = 255, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_20 = 256, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_28 = 257, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_38 = 258, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_41 = 259, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_50 = 260, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_51 = 261, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_66 = 262, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_70 = 263, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_71 = 264, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_74 = 265, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_75 = 266, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_76 = 267, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_77 = 268, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_78 = 269, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_79 = 270, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_80 = 271, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_81 = 272, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_82 = 273, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_83 = 274, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_84 = 275, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_85 = 276, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_257= 277, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_258= 278, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_259= 279, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_260= 280, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_261= 281, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_12 = 282, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_25 = 283, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_34 = 284, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_39 = 285, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_40 = 286, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_65 = 287, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_86 = 288, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_48 = 289, + QMI_NAS_ACTIVE_BAND_NR5G_BAND_14 = 290 +} QMI_NAS_ACTIVE_BAND_E; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8_t srv_domain_valid; + uint8_t srv_domain; + uint8_t srv_capability_valid; + uint8_t srv_capability; + uint8_t roam_status_valid; + uint8_t roam_status; + uint8_t is_sys_forbidden_valid; + uint8_t is_sys_forbidden; + + uint8_t lac_valid; + uint16_t lac; + uint8_t cell_id_valid; + uint32_t cell_id; + uint8_t reg_reject_info_valid; + uint8_t reject_srv_domain; + uint8_t rej_cause; + uint8_t network_id_valid; + UCHAR MCC[3]; + UCHAR MNC[3]; + + uint8_t tac_valid; + uint16_t tac; +} __attribute__ ((packed)) NR5G_SYSTEM_INFO, *PNR5G_SYSTEM_INFO; + +#if 0 +typedef struct _QMINAS_SERVING_SYSTEM_IND_MSG +{ + USHORT Type; + USHORT Length; +} QMINAS_SERVING_SYSTEM_IND_MSG, *PQMINAS_SERVING_SYSTEM_IND_MSG; + +typedef struct _QMINAS_SET_PREFERRED_NETWORK_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT NumPreferredNetwork; + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + USHORT RadioAccess; +} QMINAS_SET_PREFERRED_NETWORK_REQ_MSG, *PQMINAS_SET_PREFERRED_NETWORK_REQ_MSG; + +typedef struct _QMINAS_SET_PREFERRED_NETWORK_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_SET_PREFERRED_NETWORK_RESP_MSG, *PQMINAS_SET_PREFERRED_NETWORK_RESP_MSG; + +typedef struct _QMINAS_SET_FORBIDDEN_NETWORK_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT NumForbiddenNetwork; + USHORT MobileCountryCode; + USHORT MobileNetworkCode; +} QMINAS_SET_FORBIDDEN_NETWORK_REQ_MSG, *PQMINAS_SET_FORBIDDEN_NETWORK_REQ_MSG; + +typedef struct _QMINAS_SET_FORBIDDEN_NETWORK_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_SET_FORBIDDEN_NETWORK_RESP_MSG, *PQMINAS_SET_FORBIDDEN_NETWORK_RESP_MSG; + +typedef struct _QMINAS_PERFORM_NETWORK_SCAN_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMINAS_PERFORM_NETWORK_SCAN_REQ_MSG, *PQMINAS_PERFORM_NETWORK_SCAN_REQ_MSG; + +typedef struct _VISIBLE_NETWORK +{ + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + UCHAR NetworkStatus; + UCHAR NetworkDesclen; +} VISIBLE_NETWORK, *PVISIBLE_NETWORK; + +typedef struct _QMINAS_PERFORM_NETWORK_SCAN_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_PERFORM_NETWORK_SCAN_RESP_MSG, *PQMINAS_PERFORM_NETWORK_SCAN_RESP_MSG; + +typedef struct _QMINAS_PERFORM_NETWORK_SCAN_NETWORK_INFO +{ + UCHAR TLVType; // 0x010 - required parameter + USHORT TLVLength; // length + USHORT NumNetworkInstances; +} QMINAS_PERFORM_NETWORK_SCAN_NETWORK_INFO, *PQMINAS_PERFORM_NETWORK_SCAN_NETWORK_INFO; + +typedef struct _QMINAS_PERFORM_NETWORK_SCAN_RAT_INFO +{ + UCHAR TLVType; // 0x011 - required parameter + USHORT TLVLength; // length + USHORT NumInst; +} QMINAS_PERFORM_NETWORK_SCAN_RAT_INFO, *PQMINAS_PERFORM_NETWORK_SCAN_RAT_INFO; + +typedef struct _QMINAS_PERFORM_NETWORK_SCAN_RAT +{ + USHORT MCC; + USHORT MNC; + UCHAR RAT; +} QMINAS_PERFORM_NETWORK_SCAN_RAT, *PQMINAS_PERFORM_NETWORK_SCAN_RAT; + + +typedef struct _QMINAS_MANUAL_NW_REGISTER +{ + UCHAR TLV2Type; // 0x02 - result code + USHORT TLV2Length; // 4 + USHORT MobileCountryCode; + USHORT MobileNetworkCode; + UCHAR RadioAccess; +} QMINAS_MANUAL_NW_REGISTER, *PQMINAS_MANUAL_NW_REGISTER; + +typedef struct _QMINAS_INITIATE_NW_REGISTER_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + UCHAR RegisterAction; +} QMINAS_INITIATE_NW_REGISTER_REQ_MSG, *PQMINAS_INITIATE_NW_REGISTER_REQ_MSG; + +typedef struct _QMINAS_INITIATE_NW_REGISTER_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_INITIATE_NW_REGISTER_RESP_MSG, *PQMINAS_INITIATE_NW_REGISTER_RESP_MSG; + +typedef struct _QMINAS_SET_TECHNOLOGY_PREF_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT TechPref; + UCHAR Duration; +} QMINAS_SET_TECHNOLOGY_PREF_REQ_MSG, *PQMINAS_SET_TECHNOLOGY_PREF_REQ_MSG; + +typedef struct _QMINAS_SET_TECHNOLOGY_PREF_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_SET_TECHNOLOGY_PREF_RESP_MSG, *PQMINAS_SET_TECHNOLOGY_PREF_RESP_MSG; + +typedef struct _QMINAS_GET_SIGNAL_STRENGTH_REQ_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; +} QMINAS_GET_SIGNAL_STRENGTH_REQ_MSG, *PQMINAS_GET_SIGNAL_STRENGTH_REQ_MSG; + +typedef struct _QMINAS_SIGNAL_STRENGTH +{ + CHAR SigStrength; + UCHAR RadioIf; +} QMINAS_SIGNAL_STRENGTH, *PQMINAS_SIGNAL_STRENGTH; + +typedef struct _QMINAS_SIGNAL_STRENGTH_LIST +{ + UCHAR TLV3Type; + USHORT TLV3Length; + USHORT NumInstance; +} QMINAS_SIGNAL_STRENGTH_LIST, *PQMINAS_SIGNAL_STRENGTH_LIST; + + +typedef struct _QMINAS_GET_SIGNAL_STRENGTH_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + CHAR SignalStrength; + UCHAR RadioIf; +} QMINAS_GET_SIGNAL_STRENGTH_RESP_MSG, *PQMINAS_GET_SIGNAL_STRENGTH_RESP_MSG; + + +typedef struct _QMINAS_SET_EVENT_REPORT_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR ReportSigStrength; + UCHAR NumTresholds; + CHAR TresholdList[2]; +} QMINAS_SET_EVENT_REPORT_REQ_MSG, *PQMINAS_SET_EVENT_REPORT_REQ_MSG; + +typedef struct _QMINAS_SET_EVENT_REPORT_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_SET_EVENT_REPORT_RESP_MSG, *PQMINAS_SET_EVENT_REPORT_RESP_MSG; + +typedef struct _QMINAS_SIGNAL_STRENGTH_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + CHAR SigStrength; + UCHAR RadioIf; +} QMINAS_SIGNAL_STRENGTH_TLV, *PQMINAS_SIGNAL_STRENGTH_TLV; + +typedef struct _QMINAS_REJECT_CAUSE_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR ServiceDomain; + USHORT RejectCause; +} QMINAS_REJECT_CAUSE_TLV, *PQMINAS_REJECT_CAUSE_TLV; + +typedef struct _QMINAS_EVENT_REPORT_IND_MSG +{ + USHORT Type; + USHORT Length; +} QMINAS_EVENT_REPORT_IND_MSG, *PQMINAS_EVENT_REPORT_IND_MSG; + +typedef struct _QMINAS_GET_RF_BAND_INFO_REQ_MSG +{ + USHORT Type; + USHORT Length; +} QMINAS_GET_RF_BAND_INFO_REQ_MSG, *PQMINAS_GET_RF_BAND_INFO_REQ_MSG; + +typedef struct _QMINASRF_BAND_INFO +{ + UCHAR RadioIf; + USHORT ActiveBand; + USHORT ActiveChannel; +} QMINASRF_BAND_INFO, *PQMINASRF_BAND_INFO; + +typedef struct _QMINAS_GET_RF_BAND_INFO_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR NumInstances; +} QMINAS_GET_RF_BAND_INFO_RESP_MSG, *PQMINAS_GET_RF_BAND_INFO_RESP_MSG; + + +typedef struct _QMINAS_GET_PLMN_NAME_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT MCC; + USHORT MNC; +} QMINAS_GET_PLMN_NAME_REQ_MSG, *PQMINAS_GET_PLMN_NAME_REQ_MSG; + +typedef struct _QMINAS_GET_PLMN_NAME_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_GET_PLMN_NAME_RESP_MSG, *PQMINAS_GET_PLMN_NAME_RESP_MSG; + +typedef struct _QMINAS_GET_PLMN_NAME_SPN +{ + UCHAR TLVType; + USHORT TLVLength; + UCHAR SPN_Enc; + UCHAR SPN_Len; +} QMINAS_GET_PLMN_NAME_SPN, *PQMINAS_GET_PLMN_NAME_SPN; + +typedef struct _QMINAS_GET_PLMN_NAME_PLMN +{ + UCHAR PLMN_Enc; + UCHAR PLMN_Ci; + UCHAR PLMN_SpareBits; + UCHAR PLMN_Len; +} QMINAS_GET_PLMN_NAME_PLMN, *PQMINAS_GET_PLMN_NAME_PLMN; + +typedef struct _QMINAS_INITIATE_ATTACH_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR PsAttachAction; +} QMINAS_INITIATE_ATTACH_REQ_MSG, *PQMINAS_INITIATE_ATTACH_REQ_MSG; + +typedef struct _QMINAS_INITIATE_ATTACH_RESP_MSG +{ + USHORT Type; // QMUX type 0x0003 + USHORT Length; + UCHAR TLVType; // 0x02 - result code + USHORT TLVLength; // 4 + USHORT QMUXResult; // QMI_RESULT_SUCCESS + // QMI_RESULT_FAILURE + USHORT QMUXError; // QMI_ERR_INVALID_ARG + // QMI_ERR_NO_MEMORY + // QMI_ERR_INTERNAL + // QMI_ERR_FAULT +} QMINAS_INITIATE_ATTACH_RESP_MSG, *PQMINAS_INITIATE_ATTACH_RESP_MSG; +#endif +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rssi; + SHORT ecio; +} __attribute__ ((packed)) QMINAS_SIG_INFO_CDMA_TLV_MSG, *PQMINAS_SIG_INFO_CDMA_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rssi; + SHORT ecio; + CHAR sinr; + INT io; +} __attribute__ ((packed)) QMINAS_SIG_INFO_HDR_TLV_MSG, *PQMINAS_SIG_INFO_HDR_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rssi; +} __attribute__ ((packed)) QMINAS_SIG_INFO_GSM_TLV_MSG, *PQMINAS_SIG_INFO_GSM_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rssi; + SHORT ecio; +} __attribute__ ((packed)) QMINAS_SIG_INFO_WCDMA_TLV_MSG, *PQMINAS_SIG_INFO_WCDMA_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rssi; + CHAR rsrq; + SHORT rsrp; + SHORT snr; +} __attribute__ ((packed)) QMINAS_SIG_INFO_LTE_TLV_MSG, *PQMINAS_SIG_INFO_LTE_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + CHAR rscp; +} __attribute__ ((packed)) QMINAS_SIG_INFO_TDSCDMA_TLV_MSG, *PQMINAS_SIG_INFO_TDSCDMA_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + SHORT rsrp; + SHORT snr; +} __attribute__ ((packed)) QMINAS_SIG_INFO_5G_NSA_TLV_MSG, *PQMINAS_SIG_INFO_5G_NSA_TLV_MSG; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + SHORT nr5g_rsrq; +} __attribute__ ((packed)) QMINAS_SIG_INFO_5G_SA_TLV_MSG, *PQMINAS_SIG_INFO_5G_SA_TLV_MSG; + +typedef struct { + uint8 radio_if; + uint16 active_band; + uint16 active_channel; +} __attribute__ ((packed)) NasGetRfBandInfo; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 num_instances; + NasGetRfBandInfo bands_array[0]; +} __attribute__ ((packed)) NasGetRfBandInfoList; + +typedef struct { + uint8 radio_if; + uint16 dedicated_band; +} __attribute__ ((packed)) NasGetRfBandInfoDedicated; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 num_instances; + NasGetRfBandInfoDedicated bands_array[0]; +} __attribute__ ((packed)) NasGetRfBandInfoDedicatedList; + +typedef struct { + uint8 radio_if; + uint16 active_band; + uint32 active_channel; +} __attribute__ ((packed)) NasGetRfBandInfoExtended; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 num_instances; + NasGetRfBandInfoExtended bands_array[0]; +} __attribute__ ((packed)) NasGetRfBandInfoExtendedList; + +typedef struct { + uint8 radio_if; + uint32 bandwidth; +} __attribute__ ((packed)) NasGetRfBandInfoBandWidth; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 num_instances; + NasGetRfBandInfoBandWidth bands_array[0]; +} __attribute__ ((packed)) NasGetRfBandInfoBandWidthList; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 plmn[3]; + uint8 tac[3]; + uint64 global_cell_id; + uint16 physical_cell_id; + int16 rsrq; + int16 rsrp; + int16 snr; +} __attribute__ ((packed)) NasGetCellLocationNr5gServingCell; + +typedef struct { + uint16 physical_cell_id; + int16 rsrq; + int16 rsrp; + int16 rssi; + int16 cell_selection_rx_level; +} __attribute__ ((packed)) NasGetCellLocationLteInfoCell; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 ue_in_idle; + uint8 plmn[3]; + uint16 tracking_area_code; + uint32 global_cell_id; + uint16 absolute_rf_channel_number; + uint16 serving_cell_id; + uint8 cell_reselection_priority; + uint8 s_non_intra_search_threshold; + uint8 serving_cell_low_threshold; + uint8 s_intra_search_threshold; + uint8 cells_len; + NasGetCellLocationLteInfoCell cells_array[0]; +} __attribute__ ((packed)) NasGetCellLocationLteInfoIntrafrequency; + +typedef struct _QmiMessageNasGetCellLocationInfoOutputInterfrequencyLteInfoFrequencyElement { + uint16 eutra_absolute_rf_channel_number; + uint8 cell_selection_rx_level_low_threshold; + uint8 cell_selection_rx_level_high_threshold; + uint8 cell_reselection_priority; + uint8 cells_len; + NasGetCellLocationLteInfoCell cells_array[0]; +} __attribute__ ((packed)) NasGetCellLocationLteInfoInterfrequencyFrequencyElement; + +typedef struct { + UCHAR TLVType; + USHORT TLVLength; + + uint8 ue_in_idle; + uint8 freqs_len; + NasGetCellLocationLteInfoInterfrequencyFrequencyElement freqs[0]; +} __attribute__ ((packed)) NasGetCellLocationLteInfoInterfrequency; + +// ======================= End of NAS ============================== + +// ======================= UIM ============================== +#define QMIUIM_READ_TRANSPARENT_REQ 0x0020 +#define QMIUIM_READ_TRANSPARENT_RESP 0x0020 +#define QMIUIM_READ_TRANSPARENT_IND 0x0020 +#define QMIUIM_READ_RECORD_REQ 0x0021 +#define QMIUIM_READ_RECORD_RESP 0x0021 +#define QMIUIM_READ_RECORD_IND 0x0021 +#define QMIUIM_WRITE_TRANSPARENT_REQ 0x0022 +#define QMIUIM_WRITE_TRANSPARENT_RESP 0x0022 +#define QMIUIM_WRITE_TRANSPARENT_IND 0x0022 +#define QMIUIM_WRITE_RECORD_REQ 0x0023 +#define QMIUIM_WRITE_RECORD_RESP 0x0023 +#define QMIUIM_WRITE_RECORD_IND 0x0023 +#define QMIUIM_SET_PIN_PROTECTION_REQ 0x0025 +#define QMIUIM_SET_PIN_PROTECTION_RESP 0x0025 +#define QMIUIM_SET_PIN_PROTECTION_IND 0x0025 +#define QMIUIM_VERIFY_PIN_REQ 0x0026 +#define QMIUIM_VERIFY_PIN_RESP 0x0026 +#define QMIUIM_VERIFY_PIN_IND 0x0026 +#define QMIUIM_UNBLOCK_PIN_REQ 0x0027 +#define QMIUIM_UNBLOCK_PIN_RESP 0x0027 +#define QMIUIM_UNBLOCK_PIN_IND 0x0027 +#define QMIUIM_CHANGE_PIN_REQ 0x0028 +#define QMIUIM_CHANGE_PIN_RESP 0x0028 +#define QMIUIM_CHANGE_PIN_IND 0x0028 +#define QMIUIM_DEPERSONALIZATION_REQ 0x0029 +#define QMIUIM_DEPERSONALIZATION_RESP 0x0029 +#define QMIUIM_EVENT_REG_REQ 0x002E +#define QMIUIM_EVENT_REG_RESP 0x002E +#define QMIUIM_GET_CARD_STATUS_REQ 0x002F +#define QMIUIM_GET_CARD_STATUS_RESP 0x002F +#define QMIUIM_STATUS_CHANGE_IND 0x0032 +#define QMIUIM_POWER_DOWN 0x0030 +#define QMIUIM_POWER_UP 0x0031 + + +typedef struct _QMIUIM_GET_CARD_STATUS_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMIUIM_GET_CARD_STATUS_RESP_MSG, *PQMIUIM_GET_CARD_STATUS_RESP_MSG; + +#define UIM_CARD_STATE_ABSENT 0x00 +#define UIM_CARD_STATE_PRESENT 0x01 +#define UIM_CARD_STATE_ERROR 0x02 + +typedef struct _QMIUIM_CARD_STATUS +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT IndexGWPri; + USHORT Index1XPri; + USHORT IndexGWSec; + USHORT Index1XSec; + UCHAR NumSlot; + UCHAR CardState; + UCHAR UPINState; + UCHAR UPINRetries; + UCHAR UPUKRetries; + UCHAR ErrorCode; + UCHAR NumApp; + UCHAR AppType; + UCHAR AppState; + UCHAR PersoState; + UCHAR PersoFeature; + UCHAR PersoRetries; + UCHAR PersoUnblockRetries; + UCHAR AIDLength; +} __attribute__ ((packed)) QMIUIM_CARD_STATUS, *PQMIUIM_CARD_STATUS; + +typedef struct _QMIUIM_PIN_STATE +{ + UCHAR UnivPIN; + UCHAR PIN1State; + UCHAR PIN1Retries; + UCHAR PUK1Retries; + UCHAR PIN2State; + UCHAR PIN2Retries; + UCHAR PUK2Retries; +} __attribute__ ((packed)) QMIUIM_PIN_STATE, *PQMIUIM_PIN_STATE; + +typedef struct _QMIUIM_VERIFY_PIN_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR Session_Type; + UCHAR Aid_Len; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINID; + UCHAR PINLen; + UCHAR PINValue; +} __attribute__ ((packed)) QMIUIM_VERIFY_PIN_REQ_MSG, *PQMIUIM_VERIFY_PIN_REQ_MSG; + +typedef struct _QMIUIM_VERIFY_PIN_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; + UCHAR TLV2Type; + USHORT TLV2Length; + UCHAR PINVerifyRetriesLeft; + UCHAR PINUnblockRetriesLeft; +} __attribute__ ((packed)) QMIUIM_VERIFY_PIN_RESP_MSG, *PQMIUIM_VERIFY_PIN_RESP_MSG; + +typedef struct _QMIUIM_READ_TRANSPARENT_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR Session_Type; + UCHAR Aid_Len; + UCHAR TLV2Type; + USHORT TLV2Length; + USHORT file_id; + UCHAR path_len; + UCHAR path[]; +} __attribute__ ((packed)) QMIUIM_READ_TRANSPARENT_REQ_MSG, *PQMIUIM_READ_TRANSPARENT_REQ_MSG; + +typedef struct _READ_TRANSPARENT_TLV +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT Offset; + USHORT Length; +} __attribute__ ((packed)) READ_TRANSPARENT_TLV, *PREAD_TRANSPARENT_TLV; + +typedef struct _QMIUIM_CONTENT +{ + UCHAR TLVType; + USHORT TLVLength; + USHORT content_len; + UCHAR content[]; +} __attribute__ ((packed)) QMIUIM_CONTENT, *PQMIUIM_CONTENT; + +typedef struct _QMIUIM_READ_TRANSPARENT_RESP_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + USHORT QMUXResult; + USHORT QMUXError; +} __attribute__ ((packed)) QMIUIM_READ_TRANSPARENT_RESP_MSG, *PQMIUIM_READ_TRANSPARENT_RESP_MSG; + +typedef struct _QMIUIM_SET_CARD_SLOT_REQ_MSG +{ + USHORT Type; + USHORT Length; + UCHAR TLVType; + USHORT TLVLength; + UCHAR slot; +} __attribute__ ((packed)) QMIUIM_SET_CARD_SLOT_REQ_MSG, *PQMIUIM_SET_CARD_SLOT_REQ_MSG; + +// ======================= COEX ============================== +#define QMI_COEX_GET_WWAN_STATE_REQ 0x22 +#define QMI_COEX_GET_WWAN_STATE_RESP 0x22 + +typedef struct { + + uint32_t freq; + /**< Band center frequency in MHz. */ + + uint32_t bandwidth; + /**< Bandwidth in MHz. */ +}coex_band_type_v01; /* Type */ + +typedef struct _QMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND +{ + UCHAR TLVType; + USHORT TLVLength; + coex_band_type_v01 ul_band; + coex_band_type_v01 dl_band; +} __attribute__ ((packed)) QMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND, *PQMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND; + + +typedef struct _QMUX_MSG +{ + QCQMUX_HDR QMUXHdr; + union + { + // Message Header + QCQMUX_MSG_HDR QMUXMsgHdr; + QCQMUX_MSG_HDR_RESP QMUXMsgHdrResp; + + // QMIWDS Message +#if 0 + QMIWDS_GET_PKT_SRVC_STATUS_REQ_MSG PacketServiceStatusReq; + QMIWDS_GET_PKT_SRVC_STATUS_RESP_MSG PacketServiceStatusRsp; + QMIWDS_GET_PKT_SRVC_STATUS_IND_MSG PacketServiceStatusInd; + QMIWDS_EVENT_REPORT_IND_MSG EventReportInd; + QMIWDS_GET_CURRENT_CHANNEL_RATE_REQ_MSG GetCurrChannelRateReq; + QMIWDS_GET_CURRENT_CHANNEL_RATE_RESP_MSG GetCurrChannelRateRsp; + QMIWDS_GET_PKT_STATISTICS_REQ_MSG GetPktStatsReq; + QMIWDS_GET_PKT_STATISTICS_RESP_MSG GetPktStatsRsp; + QMIWDS_SET_EVENT_REPORT_REQ_MSG EventReportReq; + QMIWDS_SET_EVENT_REPORT_RESP_MSG EventReportRsp; +#endif + //#ifdef QC_IP_MODE + QMIWDS_GET_RUNTIME_SETTINGS_REQ_MSG GetRuntimeSettingsReq; + QMIWDS_GET_RUNTIME_SETTINGS_RESP_MSG GetRuntimeSettingsRsp; + //#endif // QC_IP_MODE + QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ_MSG SetClientIpFamilyPrefReq; + QMIWDS_SET_CLIENT_IP_FAMILY_PREF_RESP_MSG SetClientIpFamilyPrefResp; + QMIWDS_SET_AUTO_CONNECT_REQ_MSG SetAutoConnectReq; +#if 0 + QMIWDS_GET_MIP_MODE_REQ_MSG GetMipModeReq; + QMIWDS_GET_MIP_MODE_RESP_MSG GetMipModeResp; +#endif + QMIWDS_START_NETWORK_INTERFACE_REQ_MSG StartNwInterfaceReq; + QMIWDS_START_NETWORK_INTERFACE_RESP_MSG StartNwInterfaceResp; + QMIWDS_STOP_NETWORK_INTERFACE_REQ_MSG StopNwInterfaceReq; + QMIWDS_STOP_NETWORK_INTERFACE_RESP_MSG StopNwInterfaceResp; + QMIWDS_GET_DEFAULT_SETTINGS_REQ_MSG GetDefaultSettingsReq; + QMIWDS_GET_DEFAULT_SETTINGS_RESP_MSG GetDefaultSettingsResp; + QMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG ModifyProfileSettingsReq; + QMIWDS_MODIFY_PROFILE_SETTINGS_RESP_MSG ModifyProfileSettingsResp; + QMIWDS_GET_PROFILE_SETTINGS_REQ_MSG GetProfileSettingsReq; + QMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG CreatetProfileSettingsReq; + QMIWDS_GET_PROFILE_LIST_REQ_MSG GetProfileListReq; + QMIWDS_GET_PROFILE_LIST_RESP_MSG GetProfileListResp; +#if 0 + QMIWDS_GET_DATA_BEARER_REQ_MSG GetDataBearerReq; + QMIWDS_GET_DATA_BEARER_RESP_MSG GetDataBearerResp; + QMIWDS_DUN_CALL_INFO_REQ_MSG DunCallInfoReq; + QMIWDS_DUN_CALL_INFO_RESP_MSG DunCallInfoResp; +#endif + QMIWDS_BIND_MUX_DATA_PORT_REQ_MSG BindMuxDataPortReq; + + // QMIDMS Messages +#if 0 + QMIDMS_GET_DEVICE_MFR_REQ_MSG GetDeviceMfrReq; + QMIDMS_GET_DEVICE_MFR_RESP_MSG GetDeviceMfrRsp; + QMIDMS_GET_DEVICE_MODEL_ID_REQ_MSG GetDeviceModeIdReq; + QMIDMS_GET_DEVICE_MODEL_ID_RESP_MSG GetDeviceModeIdRsp; + QMIDMS_GET_DEVICE_REV_ID_REQ_MSG GetDeviceRevIdReq; + QMIDMS_GET_DEVICE_REV_ID_RESP_MSG GetDeviceRevIdRsp; + QMIDMS_GET_MSISDN_REQ_MSG GetMsisdnReq; + QMIDMS_GET_MSISDN_RESP_MSG GetMsisdnRsp; + QMIDMS_GET_DEVICE_SERIAL_NUMBERS_REQ_MSG GetDeviceSerialNumReq; + QMIDMS_GET_DEVICE_SERIAL_NUMBERS_RESP_MSG GetDeviceSerialNumRsp; + QMIDMS_GET_DEVICE_CAP_REQ_MSG GetDeviceCapReq; + QMIDMS_GET_DEVICE_CAP_RESP_MSG GetDeviceCapResp; + QMIDMS_GET_BAND_CAP_REQ_MSG GetBandCapReq; + QMIDMS_GET_BAND_CAP_RESP_MSG GetBandCapRsp; + QMIDMS_GET_ACTIVATED_STATUS_REQ_MSG GetActivatedStatusReq; + QMIDMS_GET_ACTIVATED_STATUS_RESP_MSG GetActivatedStatusResp; + QMIDMS_GET_OPERATING_MODE_REQ_MSG GetOperatingModeReq; + QMIDMS_GET_OPERATING_MODE_RESP_MSG GetOperatingModeResp; +#endif + QMIDMS_SET_OPERATING_MODE_REQ_MSG SetOperatingModeReq; + QMIDMS_SET_OPERATING_MODE_RESP_MSG SetOperatingModeResp; +#if 0 + QMIDMS_UIM_GET_ICCID_REQ_MSG GetICCIDReq; + QMIDMS_UIM_GET_ICCID_RESP_MSG GetICCIDResp; + QMIDMS_ACTIVATE_AUTOMATIC_REQ_MSG ActivateAutomaticReq; + QMIDMS_ACTIVATE_AUTOMATIC_RESP_MSG ActivateAutomaticResp; + QMIDMS_ACTIVATE_MANUAL_REQ_MSG ActivateManualReq; + QMIDMS_ACTIVATE_MANUAL_RESP_MSG ActivateManualResp; +#endif + QMIDMS_UIM_GET_PIN_STATUS_REQ_MSG UIMGetPinStatusReq; + QMIDMS_UIM_GET_PIN_STATUS_RESP_MSG UIMGetPinStatusResp; + QMIDMS_UIM_VERIFY_PIN_REQ_MSG UIMVerifyPinReq; + QMIDMS_UIM_VERIFY_PIN_RESP_MSG UIMVerifyPinResp; +#if 0 + QMIDMS_UIM_SET_PIN_PROTECTION_REQ_MSG UIMSetPinProtectionReq; + QMIDMS_UIM_SET_PIN_PROTECTION_RESP_MSG UIMSetPinProtectionResp; + QMIDMS_UIM_CHANGE_PIN_REQ_MSG UIMChangePinReq; + QMIDMS_UIM_CHANGE_PIN_RESP_MSG UIMChangePinResp; + QMIDMS_UIM_UNBLOCK_PIN_REQ_MSG UIMUnblockPinReq; + QMIDMS_UIM_UNBLOCK_PIN_RESP_MSG UIMUnblockPinResp; + QMIDMS_SET_EVENT_REPORT_REQ_MSG DmsSetEventReportReq; + QMIDMS_SET_EVENT_REPORT_RESP_MSG DmsSetEventReportResp; + QMIDMS_EVENT_REPORT_IND_MSG DmsEventReportInd; +#endif + QMIDMS_UIM_GET_STATE_REQ_MSG UIMGetStateReq; + QMIDMS_UIM_GET_STATE_RESP_MSG UIMGetStateResp; + QMIDMS_UIM_GET_IMSI_REQ_MSG UIMGetIMSIReq; + QMIDMS_UIM_GET_IMSI_RESP_MSG UIMGetIMSIResp; +#if 0 + QMIDMS_UIM_GET_CK_STATUS_REQ_MSG UIMGetCkStatusReq; + QMIDMS_UIM_GET_CK_STATUS_RESP_MSG UIMGetCkStatusResp; + QMIDMS_UIM_SET_CK_PROTECTION_REQ_MSG UIMSetCkProtectionReq; + QMIDMS_UIM_SET_CK_PROTECTION_RESP_MSG UIMSetCkProtectionResp; + QMIDMS_UIM_UNBLOCK_CK_REQ_MSG UIMUnblockCkReq; + QMIDMS_UIM_UNBLOCK_CK_RESP_MSG UIMUnblockCkResp; +#endif + + // QMIQOS Messages +#if 1 + QMI_QOS_SET_EVENT_REPORT_REQ_MSG QosSetEventReportReq; + QMI_QOS_SET_EVENT_REPORT_RESP_MSG QosSetEventReportRsp; + QMI_QOS_SET_EVENT_REPORT_IND_MSG QosSetEventReportInd; + QMI_QOS_BIND_DATA_PORT_REQ_MSG QosBindDataPortReq; + QMI_QOS_BIND_DATA_PORT_RESP_MSG QosBindDataPortRsp; + QMI_QOS_INDICATION_REGISTER_REQ_MSG QosIndRegReq; + QMI_QOS_INDICATION_REGISTER_RESP_MSG QosIndRegRsp; + QMI_QOS_GLOBAL_QOS_FLOW_IND_MSG QosGlobalQosFlowInd; + QMI_QOS_GET_QOS_INFO_REQ_MSG QosGetQosInfoReq; + QMI_QOS_GET_QOS_INFO_RESP_MSG QosGetQosInfoRsp; +#endif + + // QMIWMS Messages +#if 0 + QMIWMS_GET_MESSAGE_PROTOCOL_REQ_MSG GetMessageProtocolReq; + QMIWMS_GET_MESSAGE_PROTOCOL_RESP_MSG GetMessageProtocolResp; + QMIWMS_GET_SMSC_ADDRESS_REQ_MSG GetSMSCAddressReq; + QMIWMS_GET_SMSC_ADDRESS_RESP_MSG GetSMSCAddressResp; + QMIWMS_SET_SMSC_ADDRESS_REQ_MSG SetSMSCAddressReq; + QMIWMS_SET_SMSC_ADDRESS_RESP_MSG SetSMSCAddressResp; + QMIWMS_GET_STORE_MAX_SIZE_REQ_MSG GetStoreMaxSizeReq; + QMIWMS_GET_STORE_MAX_SIZE_RESP_MSG GetStoreMaxSizeResp; + QMIWMS_LIST_MESSAGES_REQ_MSG ListMessagesReq; + QMIWMS_LIST_MESSAGES_RESP_MSG ListMessagesResp; + QMIWMS_RAW_READ_REQ_MSG RawReadMessagesReq; + QMIWMS_RAW_READ_RESP_MSG RawReadMessagesResp; + QMIWMS_SET_EVENT_REPORT_REQ_MSG WmsSetEventReportReq; + QMIWMS_SET_EVENT_REPORT_RESP_MSG WmsSetEventReportResp; + QMIWMS_EVENT_REPORT_IND_MSG WmsEventReportInd; + QMIWMS_DELETE_REQ_MSG WmsDeleteReq; + QMIWMS_DELETE_RESP_MSG WmsDeleteResp; + QMIWMS_RAW_SEND_REQ_MSG RawSendMessagesReq; + QMIWMS_RAW_SEND_RESP_MSG RawSendMessagesResp; + QMIWMS_MODIFY_TAG_REQ_MSG WmsModifyTagReq; + QMIWMS_MODIFY_TAG_RESP_MSG WmsModifyTagResp; +#endif + + // QMINAS Messages +#if 0 + QMINAS_GET_HOME_NETWORK_REQ_MSG GetHomeNetworkReq; + QMINAS_GET_HOME_NETWORK_RESP_MSG GetHomeNetworkResp; + QMINAS_GET_PREFERRED_NETWORK_REQ_MSG GetPreferredNetworkReq; + QMINAS_GET_PREFERRED_NETWORK_RESP_MSG GetPreferredNetworkResp; + QMINAS_GET_FORBIDDEN_NETWORK_REQ_MSG GetForbiddenNetworkReq; + QMINAS_GET_FORBIDDEN_NETWORK_RESP_MSG GetForbiddenNetworkResp; + QMINAS_GET_SERVING_SYSTEM_REQ_MSG GetServingSystemReq; +#endif + QMINAS_GET_SERVING_SYSTEM_RESP_MSG GetServingSystemResp; + QMINAS_GET_SYS_INFO_RESP_MSG GetSysInfoResp; + QMINAS_SYS_INFO_IND_MSG NasSysInfoInd; +#if 0 + QMINAS_SERVING_SYSTEM_IND_MSG NasServingSystemInd; + QMINAS_SET_PREFERRED_NETWORK_REQ_MSG SetPreferredNetworkReq; + QMINAS_SET_PREFERRED_NETWORK_RESP_MSG SetPreferredNetworkResp; + QMINAS_SET_FORBIDDEN_NETWORK_REQ_MSG SetForbiddenNetworkReq; + QMINAS_SET_FORBIDDEN_NETWORK_RESP_MSG SetForbiddenNetworkResp; + QMINAS_PERFORM_NETWORK_SCAN_REQ_MSG PerformNetworkScanReq; + QMINAS_PERFORM_NETWORK_SCAN_RESP_MSG PerformNetworkScanResp; + QMINAS_INITIATE_NW_REGISTER_REQ_MSG InitiateNwRegisterReq; + QMINAS_INITIATE_NW_REGISTER_RESP_MSG InitiateNwRegisterResp; + QMINAS_SET_TECHNOLOGY_PREF_REQ_MSG SetTechnologyPrefReq; + QMINAS_SET_TECHNOLOGY_PREF_RESP_MSG SetTechnologyPrefResp; + QMINAS_GET_SIGNAL_STRENGTH_REQ_MSG GetSignalStrengthReq; + QMINAS_GET_SIGNAL_STRENGTH_RESP_MSG GetSignalStrengthResp; + QMINAS_SET_EVENT_REPORT_REQ_MSG SetEventReportReq; + QMINAS_SET_EVENT_REPORT_RESP_MSG SetEventReportResp; + QMINAS_EVENT_REPORT_IND_MSG NasEventReportInd; + QMINAS_GET_RF_BAND_INFO_REQ_MSG GetRFBandInfoReq; + QMINAS_GET_RF_BAND_INFO_RESP_MSG GetRFBandInfoResp; + QMINAS_INITIATE_ATTACH_REQ_MSG InitiateAttachReq; + QMINAS_INITIATE_ATTACH_RESP_MSG InitiateAttachResp; + QMINAS_GET_PLMN_NAME_REQ_MSG GetPLMNNameReq; + QMINAS_GET_PLMN_NAME_RESP_MSG GetPLMNNameResp; +#endif + + // QMIUIM Messages + QMIUIM_GET_CARD_STATUS_RESP_MSG UIMGetCardStatus; + QMIUIM_VERIFY_PIN_REQ_MSG UIMUIMVerifyPinReq; + QMIUIM_VERIFY_PIN_RESP_MSG UIMUIMVerifyPinResp; +#if 0 + QMIUIM_SET_PIN_PROTECTION_REQ_MSG UIMUIMSetPinProtectionReq; + QMIUIM_SET_PIN_PROTECTION_RESP_MSG UIMUIMSetPinProtectionResp; + QMIUIM_CHANGE_PIN_REQ_MSG UIMUIMChangePinReq; + QMIUIM_CHANGE_PIN_RESP_MSG UIMUIMChangePinResp; + QMIUIM_UNBLOCK_PIN_REQ_MSG UIMUIMUnblockPinReq; + QMIUIM_UNBLOCK_PIN_RESP_MSG UIMUIMUnblockPinResp; +#endif + QMIUIM_READ_TRANSPARENT_REQ_MSG UIMUIMReadTransparentReq; + QMIUIM_READ_TRANSPARENT_RESP_MSG UIMUIMReadTransparentResp; + QMIUIM_SET_CARD_SLOT_REQ_MSG UIMSetCardSlotReq; + + QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG SetDataFormatReq; + QMI_WDA_SET_LOOPBACK_CONFIG_REQ_MSG SetLoopBackReq; + QMI_WDA_SET_LOOPBACK_CONFIG_IND_MSG SetLoopBackInd; + }; +} __attribute__ ((packed)) QMUX_MSG, *PQMUX_MSG; + +typedef struct _QCQMIMSG { + QCQMI_HDR QMIHdr; + union { + QMICTL_MSG CTLMsg; + QMUX_MSG MUXMsg; + }; +} __attribute__ ((packed)) QCQMIMSG, *PQCQMIMSG; + +#pragma pack(pop) + +#endif // QCQMUX_H + diff --git a/wwan/app/quectel_cm_5G/src/QMIThread.c b/wwan/app/quectel_cm_5G/src/QMIThread.c new file mode 100644 index 0000000..8f7e866 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/QMIThread.c @@ -0,0 +1,3037 @@ +/****************************************************************************** + @file QMIThread.c + @brief QMI WWAN connectivity manager. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include "QMIThread.h" +#include + +#ifndef MIN +#define MIN(a, b) ((a) < (b)? (a): (b)) +#endif + +#define qmi_rsp_check_and_return() do { \ + if (err < 0 || pResponse == NULL) { \ + dbg_time("%s err = %d", __func__, err); \ + return err; \ + } \ + pMUXMsg = &pResponse->MUXMsg; \ + if (le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult) || le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError)) { \ + USHORT QMUXError = le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError); \ + dbg_time("%s QMUXResult = 0x%x, QMUXError = 0x%x", __func__, \ + le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult), QMUXError); \ + free(pResponse); \ + return QMUXError; \ + } \ +} while(0) + +#define qmi_rsp_check() do { \ + if (err < 0 || pResponse == NULL) { \ + dbg_time("%s err = %d", __func__, err); \ + return err; \ + } \ + pMUXMsg = &pResponse->MUXMsg; \ + if (le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult) || le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError)) { \ + USHORT QMUXError = le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError); \ + dbg_time("%s QMUXResult = 0x%x, QMUXError = 0x%x", __func__, \ + le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult), QMUXError); \ + } \ +} while(0) + +static uint32_t WdsConnectionIPv4Handle = 0; +static uint32_t WdsConnectionIPv6Handle = 0; +static int s_is_cdma = 0; +static int s_5g_type = WWAN_DATA_CLASS_NONE; +static int s_hdr_personality = 0; // 0x01-HRPD, 0x02-eHRPD +static char *qstrcpy(char *to, const char *from) { //no __strcpy_chk + char *save = to; + for (; (*to = *from) != '\0'; ++from, ++to); + return(save); +} + +static void uchar2char(char *dst_ptr, size_t dst_len, const UCHAR *src_ptr, size_t src_len) { + size_t copy = MIN(dst_len-1, src_len); + + if (copy) + memcpy(dst_ptr, src_ptr, copy); + dst_ptr[copy] = 0; +} + +static int s_9x07 = 1; + +typedef USHORT (*CUSTOMQMUX)(PQMUX_MSG pMUXMsg, void *arg); + +// To retrieve the ith (Index) TLV +PQMI_TLV_HDR GetTLV (PQCQMUX_MSG_HDR pQMUXMsgHdr, int TLVType) { + int TLVFind = 0; + USHORT Length = le16_to_cpu(pQMUXMsgHdr->Length); + PQMI_TLV_HDR pTLVHdr = (PQMI_TLV_HDR)(pQMUXMsgHdr + 1); + + while (Length >= sizeof(QMI_TLV_HDR)) { + TLVFind++; + if (TLVType > 0x1000) { + if ((TLVFind + 0x1000) == TLVType) + return pTLVHdr; + } else if (pTLVHdr->TLVType == TLVType) { + return pTLVHdr; + } + + Length -= (le16_to_cpu((pTLVHdr->TLVLength)) + sizeof(QMI_TLV_HDR)); + pTLVHdr = (PQMI_TLV_HDR)(((UCHAR *)pTLVHdr) + le16_to_cpu(pTLVHdr->TLVLength) + sizeof(QMI_TLV_HDR)); + } + + return NULL; +} + +static USHORT GetQMUXTransactionId(void) { + static int TransactionId = 0; + if (++TransactionId > 0xFFFF) + TransactionId = 1; + return TransactionId; +} + +static PQCQMIMSG ComposeQMUXMsg(UCHAR QMIType, USHORT Type, CUSTOMQMUX customQmuxMsgFunction, void *arg) { + UCHAR QMIBuf[WDM_DEFAULT_BUFSIZE]; + PQCQMIMSG pRequest = (PQCQMIMSG)QMIBuf; + int Length; + + memset(QMIBuf, 0x00, sizeof(QMIBuf)); + pRequest->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pRequest->QMIHdr.CtlFlags = 0x00; + pRequest->QMIHdr.QMIType = QMIType; + + pRequest->MUXMsg.QMUXHdr.CtlFlags = QMUX_CTL_FLAG_SINGLE_MSG | QMUX_CTL_FLAG_TYPE_CMD; + pRequest->MUXMsg.QMUXHdr.TransactionId = cpu_to_le16(GetQMUXTransactionId()); + pRequest->MUXMsg.QMUXMsgHdr.Type = cpu_to_le16(Type); + if (customQmuxMsgFunction) + pRequest->MUXMsg.QMUXMsgHdr.Length = cpu_to_le16(customQmuxMsgFunction(&pRequest->MUXMsg, arg) - sizeof(QCQMUX_MSG_HDR)); + else + pRequest->MUXMsg.QMUXMsgHdr.Length = cpu_to_le16(0x0000); + + pRequest->QMIHdr.Length = cpu_to_le16(le16_to_cpu(pRequest->MUXMsg.QMUXMsgHdr.Length) + sizeof(QCQMUX_MSG_HDR) + sizeof(QCQMUX_HDR) + + sizeof(QCQMI_HDR) - 1); + Length = le16_to_cpu(pRequest->QMIHdr.Length) + 1; + + pRequest = (PQCQMIMSG)malloc(Length); + if (pRequest == NULL) { + dbg_time("%s fail to malloc", __func__); + } else { + memcpy(pRequest, QMIBuf, Length); + } + + return pRequest; +} + +#if 0 +static USHORT NasSetEventReportReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->SetEventReportReq.TLVType = 0x10; + pMUXMsg->SetEventReportReq.TLVLength = 0x04; + pMUXMsg->SetEventReportReq.ReportSigStrength = 0x00; + pMUXMsg->SetEventReportReq.NumTresholds = 2; + pMUXMsg->SetEventReportReq.TresholdList[0] = -113; + pMUXMsg->SetEventReportReq.TresholdList[1] = -50; + return sizeof(QMINAS_SET_EVENT_REPORT_REQ_MSG); +} + +static USHORT WdsSetEventReportReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->EventReportReq.TLVType = 0x10; // 0x10 -- current channel rate indicator + pMUXMsg->EventReportReq.TLVLength = 0x0001; // 1 + pMUXMsg->EventReportReq.Mode = 0x00; // 0-do not report; 1-report when rate changes + + pMUXMsg->EventReportReq.TLV2Type = 0x11; // 0x11 + pMUXMsg->EventReportReq.TLV2Length = 0x0005; // 5 + pMUXMsg->EventReportReq.StatsPeriod = 0x00; // seconds between reports; 0-do not report + pMUXMsg->EventReportReq.StatsMask = 0x000000ff; // + + pMUXMsg->EventReportReq.TLV3Type = 0x12; // 0x12 -- current data bearer indicator + pMUXMsg->EventReportReq.TLV3Length = 0x0001; // 1 + pMUXMsg->EventReportReq.Mode3 = 0x01; // 0-do not report; 1-report when changes + + pMUXMsg->EventReportReq.TLV4Type = 0x13; // 0x13 -- dormancy status indicator + pMUXMsg->EventReportReq.TLV4Length = 0x0001; // 1 + pMUXMsg->EventReportReq.DormancyStatus = 0x00; // 0-do not report; 1-report when changes + return sizeof(QMIWDS_SET_EVENT_REPORT_REQ_MSG); +} + +static USHORT DmsSetEventReportReq(PQMUX_MSG pMUXMsg) { + PPIN_STATUS pPinState = (PPIN_STATUS)(&pMUXMsg->DmsSetEventReportReq + 1); + PUIM_STATE pUimState = (PUIM_STATE)(pPinState + 1); + // Pin State + pPinState->TLVType = 0x12; + pPinState->TLVLength = 0x01; + pPinState->ReportPinState = 0x01; + // UIM State + pUimState->TLVType = 0x15; + pUimState->TLVLength = 0x01; + pUimState->UIMState = 0x01; + return sizeof(QMIDMS_SET_EVENT_REPORT_REQ_MSG) + sizeof(PIN_STATUS) + sizeof(UIM_STATE); +} +#endif + +static USHORT WdsStartNwInterfaceReq(PQMUX_MSG pMUXMsg, void *arg) { + PQMIWDS_TECHNOLOGY_PREFERECE pTechPref; + PQMIWDS_AUTH_PREFERENCE pAuthPref; + PQMIWDS_USERNAME pUserName; + PQMIWDS_PASSWD pPasswd; + PQMIWDS_APNNAME pApnName; + PQMIWDS_IP_FAMILY_TLV pIpFamily; + USHORT TLVLength = 0; + UCHAR *pTLV; + PROFILE_T *profile = (PROFILE_T *)arg; + const char *profile_user = profile->user; + const char *profile_password = profile->password; + int profile_auth = profile->auth; + + if (s_is_cdma && (profile_user == NULL || profile_user[0] == '\0') && (profile_password == NULL || profile_password[0] == '\0')) { + profile_user = "ctnet@mycdma.cn"; + profile_password = "vnet.mobi"; + profile_auth = 2; //chap + } + + pTLV = (UCHAR *)(&pMUXMsg->StartNwInterfaceReq + 1); + pMUXMsg->StartNwInterfaceReq.Length = 0; + + // Set technology Preferece + pTechPref = (PQMIWDS_TECHNOLOGY_PREFERECE)(pTLV + TLVLength); + pTechPref->TLVType = 0x30; + pTechPref->TLVLength = cpu_to_le16(0x01); + if (s_is_cdma == 0) + pTechPref->TechPreference = 0x01; + else + pTechPref->TechPreference = 0x02; + TLVLength +=(le16_to_cpu(pTechPref->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + + // Set APN Name + if (profile->apn && !s_is_cdma) { //cdma no apn + pApnName = (PQMIWDS_APNNAME)(pTLV + TLVLength); + pApnName->TLVType = 0x14; + pApnName->TLVLength = cpu_to_le16(strlen(profile->apn)); + qstrcpy((char *)&pApnName->ApnName, profile->apn); + TLVLength +=(le16_to_cpu(pApnName->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set User Name + if (profile_user) { + pUserName = (PQMIWDS_USERNAME)(pTLV + TLVLength); + pUserName->TLVType = 0x17; + pUserName->TLVLength = cpu_to_le16(strlen(profile_user)); + qstrcpy((char *)&pUserName->UserName, profile_user); + TLVLength += (le16_to_cpu(pUserName->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set Password + if (profile_password) { + pPasswd = (PQMIWDS_PASSWD)(pTLV + TLVLength); + pPasswd->TLVType = 0x18; + pPasswd->TLVLength = cpu_to_le16(strlen(profile_password)); + qstrcpy((char *)&pPasswd->Passwd, profile_password); + TLVLength += (le16_to_cpu(pPasswd->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set Auth Protocol + if (profile_user && profile_password) { + pAuthPref = (PQMIWDS_AUTH_PREFERENCE)(pTLV + TLVLength); + pAuthPref->TLVType = 0x16; + pAuthPref->TLVLength = cpu_to_le16(0x01); + pAuthPref->AuthPreference = profile_auth; // 0 ~ None, 1 ~ Pap, 2 ~ Chap, 3 ~ MsChapV2 + TLVLength += (le16_to_cpu(pAuthPref->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Add IP Family Preference + pIpFamily = (PQMIWDS_IP_FAMILY_TLV)(pTLV + TLVLength); + pIpFamily->TLVType = 0x19; + pIpFamily->TLVLength = cpu_to_le16(0x01); + pIpFamily->IpFamily = profile->curIpFamily; + TLVLength += (le16_to_cpu(pIpFamily->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + + //Set Profile Index + if (profile->profile_index && !s_is_cdma) { //cdma only support one pdp, so no need to set profile index + PQMIWDS_PROFILE_IDENTIFIER pProfileIndex = (PQMIWDS_PROFILE_IDENTIFIER)(pTLV + TLVLength); + pProfileIndex->TLVLength = cpu_to_le16(0x01); + pProfileIndex->TLVType = 0x31; + pProfileIndex->ProfileIndex = profile->profile_index; + if (s_is_cdma && s_hdr_personality == 0x02) { + pProfileIndex->TLVType = 0x32; //profile_index_3gpp2 + pProfileIndex->ProfileIndex = 101; + } + TLVLength += (le16_to_cpu(pProfileIndex->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + return sizeof(QMIWDS_START_NETWORK_INTERFACE_REQ_MSG) + TLVLength; +} + +static USHORT WdsStopNwInterfaceReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->StopNwInterfaceReq.TLVType = 0x01; + pMUXMsg->StopNwInterfaceReq.TLVLength = cpu_to_le16(0x04); + if (*((int *)arg) == IpFamilyV4) + pMUXMsg->StopNwInterfaceReq.Handle = cpu_to_le32(WdsConnectionIPv4Handle); + else + pMUXMsg->StopNwInterfaceReq.Handle = cpu_to_le32(WdsConnectionIPv6Handle); + return sizeof(QMIWDS_STOP_NETWORK_INTERFACE_REQ_MSG); +} + +static USHORT WdsSetClientIPFamilyPref(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->SetClientIpFamilyPrefReq.TLVType = 0x01; + pMUXMsg->SetClientIpFamilyPrefReq.TLVLength = cpu_to_le16(0x01); + pMUXMsg->SetClientIpFamilyPrefReq.IpPreference = *((UCHAR *)arg); + return sizeof(QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ_MSG); +} + +static USHORT WdsSetAutoConnect(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->SetAutoConnectReq.TLVType = 0x01; + pMUXMsg->SetAutoConnectReq.TLVLength = cpu_to_le16(0x01); + pMUXMsg->SetAutoConnectReq.autoconnect_setting = *((UCHAR *)arg); + return sizeof(QMIWDS_SET_AUTO_CONNECT_REQ_MSG); +} + +enum peripheral_ep_type { + DATA_EP_TYPE_RESERVED = 0x0, + DATA_EP_TYPE_HSIC = 0x1, + DATA_EP_TYPE_HSUSB = 0x2, + DATA_EP_TYPE_PCIE = 0x3, + DATA_EP_TYPE_EMBEDDED = 0x4, + DATA_EP_TYPE_BAM_DMUX = 0x5, +}; + +static USHORT WdsSetQMUXBindMuxDataPort(PQMUX_MSG pMUXMsg, void *arg) { + QMAP_SETTING *qmap_settings = (QMAP_SETTING *)arg; + + pMUXMsg->BindMuxDataPortReq.TLVType = 0x10; + pMUXMsg->BindMuxDataPortReq.TLVLength = cpu_to_le16(0x08); + pMUXMsg->BindMuxDataPortReq.ep_type = cpu_to_le32(qmap_settings->ep_type); + pMUXMsg->BindMuxDataPortReq.iface_id = cpu_to_le32(qmap_settings->iface_id); + pMUXMsg->BindMuxDataPortReq.TLV2Type = 0x11; + pMUXMsg->BindMuxDataPortReq.TLV2Length = cpu_to_le16(0x01); + pMUXMsg->BindMuxDataPortReq.MuxId = qmap_settings->MuxId; + pMUXMsg->BindMuxDataPortReq.TLV3Type = 0x13; + pMUXMsg->BindMuxDataPortReq.TLV3Length = cpu_to_le16(0x04); + pMUXMsg->BindMuxDataPortReq.client_type = cpu_to_le32(1); //WDS_CLIENT_TYPE_TETHERED + + return sizeof(QMIWDS_BIND_MUX_DATA_PORT_REQ_MSG); +} + +static int qmap_version = 0x05; +static USHORT WdaSetDataFormat(PQMUX_MSG pMUXMsg, void *arg) { + QMAP_SETTING *qmap_settings = (QMAP_SETTING *)arg; + + if (qmap_settings->rx_urb_size == 0) { + PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS pWdsAdminQosTlv; + PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV linkProto; + PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV dlTlp; + + pWdsAdminQosTlv = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV_QOS)(&pMUXMsg->QMUXMsgHdr + 1); + pWdsAdminQosTlv->TLVType = 0x10; + pWdsAdminQosTlv->TLVLength = cpu_to_le16(0x0001); + pWdsAdminQosTlv->QOSSetting = 0; /* no-QOS header */ + + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)(pWdsAdminQosTlv + 1); + linkProto->TLVType = 0x11; + linkProto->TLVLength = cpu_to_le16(4); + linkProto->Value = cpu_to_le32(0x01); /* Set Ethernet mode */ + + dlTlp = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)(linkProto + 1);; + dlTlp->TLVType = 0x13; + dlTlp->TLVLength = cpu_to_le16(4); + dlTlp->Value = cpu_to_le32(0x00); + + if (sizeof(*linkProto) != 7 ) + dbg_time("%s sizeof(*linkProto) = %zu, is not 7!", __func__, sizeof(*linkProto) ); + + return sizeof(QCQMUX_MSG_HDR) + sizeof(*pWdsAdminQosTlv) + sizeof(*linkProto) + sizeof(*dlTlp); + } + else { + //Indicates whether the Quality of Service(QOS) data format is used by the client. + pMUXMsg->SetDataFormatReq.QosDataFormatTlv.TLVType = 0x10; + pMUXMsg->SetDataFormatReq.QosDataFormatTlv.TLVLength = cpu_to_le16(0x0001); + pMUXMsg->SetDataFormatReq.QosDataFormatTlv.QOSSetting = 0; /* no-QOS header */ + + //Underlying Link Layer Protocol + pMUXMsg->SetDataFormatReq.UnderlyingLinkLayerProtocolTlv.TLVType = 0x11; + pMUXMsg->SetDataFormatReq.UnderlyingLinkLayerProtocolTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.UnderlyingLinkLayerProtocolTlv.Value = cpu_to_le32(0x02); /* Set IP mode */ + + //Uplink (UL) data aggregation protocol to be used for uplink data transfer. + pMUXMsg->SetDataFormatReq.UplinkDataAggregationProtocolTlv.TLVType = 0x12; + pMUXMsg->SetDataFormatReq.UplinkDataAggregationProtocolTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.UplinkDataAggregationProtocolTlv.Value = cpu_to_le32(qmap_version); //UL QMAP is enabled + + //Downlink (DL) data aggregation protocol to be used for downlink data transfer + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationProtocolTlv.TLVType = 0x13; + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationProtocolTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationProtocolTlv.Value = cpu_to_le32(qmap_version); //DL QMAP is enabled + + //Maximum number of datagrams in a single aggregated packet on downlink + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxDatagramsTlv.TLVType = 0x15; + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxDatagramsTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxDatagramsTlv.Value = cpu_to_le32(qmap_settings->rx_urb_size/512); + + //Maximum size in bytes of a single aggregated packet allowed on downlink + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxSizeTlv.TLVType = 0x16; + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxSizeTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.DownlinkDataAggregationMaxSizeTlv.Value = cpu_to_le32(qmap_settings->rx_urb_size); + + //Peripheral End Point ID + pMUXMsg->SetDataFormatReq.epTlv.TLVType = 0x17; + pMUXMsg->SetDataFormatReq.epTlv.TLVLength = cpu_to_le16(8); + pMUXMsg->SetDataFormatReq.epTlv.ep_type = cpu_to_le32(qmap_settings->ep_type); + pMUXMsg->SetDataFormatReq.epTlv.iface_id = cpu_to_le32(qmap_settings->iface_id); + +#ifdef QUECTEL_UL_DATA_AGG + if (!qmap_settings->ul_data_aggregation_max_datagrams) { + return ((size_t)&((QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG *)0)->DlMinimumPassingTlv); + } + + //Maximum number of datagrams in a single aggregated packet on uplink + pMUXMsg->SetDataFormatReq.DlMinimumPassingTlv.TLVType = 0x19; + pMUXMsg->SetDataFormatReq.DlMinimumPassingTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.DlMinimumPassingTlv.Value = cpu_to_le32(qmap_settings->dl_minimum_padding); + + //Maximum number of datagrams in a single aggregated packet on uplink + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxDatagramsTlv.TLVType = 0x1B; + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxDatagramsTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxDatagramsTlv.Value = cpu_to_le32(qmap_settings->ul_data_aggregation_max_datagrams); + + //Maximum size in bytes of a single aggregated packet allowed on downlink + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxSizeTlv.TLVType = 0x1C; + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxSizeTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->SetDataFormatReq.UplinkDataAggregationMaxSizeTlv.Value = cpu_to_le32(qmap_settings->ul_data_aggregation_max_size); +#endif + + return sizeof(QMIWDS_ADMIN_SET_DATA_FORMAT_REQ_MSG); + } +} + +#ifdef CONFIG_SIM +static USHORT DmsUIMVerifyPinReqSend(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->UIMVerifyPinReq.TLVType = 0x01; + pMUXMsg->UIMVerifyPinReq.PINID = 0x01; //Pin1, not Puk + pMUXMsg->UIMVerifyPinReq.PINLen = strlen((const char *)arg); + qstrcpy((char *)&pMUXMsg->UIMVerifyPinReq.PINValue, ((const char *)arg)); + pMUXMsg->UIMVerifyPinReq.TLVLength = cpu_to_le16(2 + strlen((const char *)arg)); + return sizeof(QMIDMS_UIM_VERIFY_PIN_REQ_MSG) + (strlen((const char *)arg) - 1); +} + +static USHORT UimVerifyPinReqSend(PQMUX_MSG pMUXMsg, void *arg) +{ + pMUXMsg->UIMUIMVerifyPinReq.TLVType = 0x01; + pMUXMsg->UIMUIMVerifyPinReq.TLVLength = cpu_to_le16(0x02); + pMUXMsg->UIMUIMVerifyPinReq.Session_Type = 0x00; + pMUXMsg->UIMUIMVerifyPinReq.Aid_Len = 0x00; + pMUXMsg->UIMUIMVerifyPinReq.TLV2Type = 0x02; + pMUXMsg->UIMUIMVerifyPinReq.TLV2Length = cpu_to_le16(2 + strlen((const char *)arg)); + pMUXMsg->UIMUIMVerifyPinReq.PINID = 0x01; //Pin1, not Puk + pMUXMsg->UIMUIMVerifyPinReq.PINLen= strlen((const char *)arg); + qstrcpy((char *)&pMUXMsg->UIMUIMVerifyPinReq.PINValue, ((const char *)arg)); + return sizeof(QMIUIM_VERIFY_PIN_REQ_MSG) + (strlen((const char *)arg) - 1); +} + +#ifdef CONFIG_IMSI_ICCID +static USHORT UimReadTransparentIMSIReqSend(PQMUX_MSG pMUXMsg, void *arg) { + PREAD_TRANSPARENT_TLV pReadTransparent; + + pMUXMsg->UIMUIMReadTransparentReq.TLVType = 0x01; + pMUXMsg->UIMUIMReadTransparentReq.TLVLength = cpu_to_le16(0x02); + if (!strcmp((char *)arg, "EF_ICCID")) { + pMUXMsg->UIMUIMReadTransparentReq.Session_Type = 0x06; + pMUXMsg->UIMUIMReadTransparentReq.Aid_Len = 0x00; + + pMUXMsg->UIMUIMReadTransparentReq.TLV2Type = 0x02; + pMUXMsg->UIMUIMReadTransparentReq.file_id = cpu_to_le16(0x2FE2); + pMUXMsg->UIMUIMReadTransparentReq.path_len = 0x02; + pMUXMsg->UIMUIMReadTransparentReq.path[0] = 0x00; + pMUXMsg->UIMUIMReadTransparentReq.path[1] = 0x3F; + } + else if(!strcmp((char *)arg, "EF_IMSI")) { + pMUXMsg->UIMUIMReadTransparentReq.Session_Type = 0x00; + pMUXMsg->UIMUIMReadTransparentReq.Aid_Len = 0x00; + + pMUXMsg->UIMUIMReadTransparentReq.TLV2Type = 0x02; + pMUXMsg->UIMUIMReadTransparentReq.file_id = cpu_to_le16(0x6F07); + pMUXMsg->UIMUIMReadTransparentReq.path_len = 0x04; + pMUXMsg->UIMUIMReadTransparentReq.path[0] = 0x00; + pMUXMsg->UIMUIMReadTransparentReq.path[1] = 0x3F; + pMUXMsg->UIMUIMReadTransparentReq.path[2] = 0xFF; + pMUXMsg->UIMUIMReadTransparentReq.path[3] = 0x7F; + } + + pMUXMsg->UIMUIMReadTransparentReq.TLV2Length = cpu_to_le16(3 + pMUXMsg->UIMUIMReadTransparentReq.path_len); + + pReadTransparent = (PREAD_TRANSPARENT_TLV)(&pMUXMsg->UIMUIMReadTransparentReq.path[pMUXMsg->UIMUIMReadTransparentReq.path_len]); + pReadTransparent->TLVType = 0x03; + pReadTransparent->TLVLength = cpu_to_le16(0x04); + pReadTransparent->Offset = cpu_to_le16(0x00); + pReadTransparent->Length = cpu_to_le16(0x00); + + return (sizeof(QMIUIM_READ_TRANSPARENT_REQ_MSG) + pMUXMsg->UIMUIMReadTransparentReq.path_len + sizeof(READ_TRANSPARENT_TLV)); +} +#endif +#endif + +#ifdef CONFIG_APN + +static USHORT WdsGetProfileListReqSend(PQMUX_MSG pMUXMsg, void *arg) { + (void)(arg); + pMUXMsg->GetProfileListReq.Length = cpu_to_le16(sizeof(QMIWDS_GET_PROFILE_LIST_REQ_MSG) - 4); + return sizeof(QMIWDS_GET_PROFILE_LIST_REQ_MSG); +} + +static USHORT WdsCreateProfileSettingsReqSend(PQMUX_MSG pMUXMsg, void *arg) { + PROFILE_T *profile = (PROFILE_T *)arg; + pMUXMsg->CreatetProfileSettingsReq.Length = cpu_to_le16(sizeof(QMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG) - 4); + pMUXMsg->CreatetProfileSettingsReq.TLVType = 0x01; + pMUXMsg->CreatetProfileSettingsReq.TLVLength = cpu_to_le16(0x01); + pMUXMsg->CreatetProfileSettingsReq.ProfileType = 0x00; // 0 ~ 3GPP, 1 ~ 3GPP2 + pMUXMsg->CreatetProfileSettingsReq.TLV2Type = 0x25; + pMUXMsg->CreatetProfileSettingsReq.TLV2Length = cpu_to_le16(0x01); + pMUXMsg->CreatetProfileSettingsReq.pdp_context = profile->pdp; // 0 ~ 3GPP, 1 ~ 3GPP2 + return sizeof(QMIWDS_CREATE_PROFILE_SETTINGS_REQ_MSG); +} + +static USHORT WdsGetProfileSettingsReqSend(PQMUX_MSG pMUXMsg, void *arg) { + PROFILE_T *profile = (PROFILE_T *)arg; + pMUXMsg->GetProfileSettingsReq.Length = cpu_to_le16(sizeof(QMIWDS_GET_PROFILE_SETTINGS_REQ_MSG) - 4); + pMUXMsg->GetProfileSettingsReq.TLVType = 0x01; + pMUXMsg->GetProfileSettingsReq.TLVLength = cpu_to_le16(0x02); + pMUXMsg->GetProfileSettingsReq.ProfileType = 0x00; // 0 ~ 3GPP, 1 ~ 3GPP2 + pMUXMsg->GetProfileSettingsReq.ProfileIndex = profile->profile_index; + return sizeof(QMIWDS_GET_PROFILE_SETTINGS_REQ_MSG); +} + +static USHORT WdsModifyProfileSettingsReq(PQMUX_MSG pMUXMsg, void *arg) { + USHORT TLVLength = 0; + UCHAR *pTLV; + PROFILE_T *profile = (PROFILE_T *)arg; + PQMIWDS_PDPTYPE pPdpType; + + pMUXMsg->ModifyProfileSettingsReq.Length = cpu_to_le16(sizeof(QMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG) - 4); + pMUXMsg->ModifyProfileSettingsReq.TLVType = 0x01; + pMUXMsg->ModifyProfileSettingsReq.TLVLength = cpu_to_le16(0x02); + pMUXMsg->ModifyProfileSettingsReq.ProfileType = 0x00; // 0 ~ 3GPP, 1 ~ 3GPP2 + pMUXMsg->ModifyProfileSettingsReq.ProfileIndex = profile->profile_index; + + pTLV = (UCHAR *)(&pMUXMsg->ModifyProfileSettingsReq + 1); + + pPdpType = (PQMIWDS_PDPTYPE)(pTLV + TLVLength); + pPdpType->TLVType = 0x11; + pPdpType->TLVLength = cpu_to_le16(0x01); + pPdpType->PdpType = profile->iptype; + TLVLength +=(le16_to_cpu(pPdpType->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + + // Set APN Name + if (profile->apn) { + PQMIWDS_APNNAME pApnName = (PQMIWDS_APNNAME)(pTLV + TLVLength); + pApnName->TLVType = 0x14; + pApnName->TLVLength = cpu_to_le16(strlen(profile->apn)); + qstrcpy((char *)&pApnName->ApnName, profile->apn); + TLVLength +=(le16_to_cpu(pApnName->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set User Name + if (profile->user) { + PQMIWDS_USERNAME pUserName = (PQMIWDS_USERNAME)(pTLV + TLVLength); + pUserName->TLVType = 0x1B; + pUserName->TLVLength = cpu_to_le16(strlen(profile->user)); + qstrcpy((char *)&pUserName->UserName, profile->user); + TLVLength += (le16_to_cpu(pUserName->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set Password + if (profile->password) { + PQMIWDS_PASSWD pPasswd = (PQMIWDS_PASSWD)(pTLV + TLVLength); + pPasswd->TLVType = 0x1C; + pPasswd->TLVLength = cpu_to_le16(strlen(profile->password)); + qstrcpy((char *)&pPasswd->Passwd, profile->password); + TLVLength +=(le16_to_cpu(pPasswd->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + // Set Auth Protocol + if (profile->user && profile->password) { + PQMIWDS_AUTH_PREFERENCE pAuthPref = (PQMIWDS_AUTH_PREFERENCE)(pTLV + TLVLength); + pAuthPref->TLVType = 0x1D; + pAuthPref->TLVLength = cpu_to_le16(0x01); + pAuthPref->AuthPreference = profile->auth; // 0 ~ None, 1 ~ Pap, 2 ~ Chap, 3 ~ MsChapV2 + TLVLength += (le16_to_cpu(pAuthPref->TLVLength) + sizeof(QCQMICTL_TLV_HDR)); + } + + return sizeof(QMIWDS_MODIFY_PROFILE_SETTINGS_REQ_MSG) + TLVLength; +} +#endif + +static USHORT WdsGetRuntimeSettingReq(PQMUX_MSG pMUXMsg, void *arg) +{ + (void)arg; + pMUXMsg->GetRuntimeSettingsReq.TLVType = 0x10; + pMUXMsg->GetRuntimeSettingsReq.TLVLength = cpu_to_le16(0x04); + // the following mask also applies to IPV6 + pMUXMsg->GetRuntimeSettingsReq.Mask = cpu_to_le32(QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4DNS_ADDR | + QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4_ADDR | + QMIWDS_GET_RUNTIME_SETTINGS_MASK_MTU | + QMIWDS_GET_RUNTIME_SETTINGS_MASK_IPV4GATEWAY_ADDR) | + QMIWDS_GET_RUNTIME_SETTINGS_MASK_PCSCF_SV_ADDR | + QMIWDS_GET_RUNTIME_SETTINGS_MASK_PCSCF_DOM_NAME; + + return sizeof(QMIWDS_GET_RUNTIME_SETTINGS_REQ_MSG); +} + +static PQCQMIMSG s_pRequest; +static PQCQMIMSG s_pResponse; + +static int is_response(const PQCQMIMSG pRequest, const PQCQMIMSG pResponse) { + if ((pRequest->QMIHdr.QMIType == pResponse->QMIHdr.QMIType) + && (pRequest->QMIHdr.ClientId == pResponse->QMIHdr.ClientId)) { + USHORT requestTID, responseTID; + if (pRequest->QMIHdr.QMIType == QMUX_TYPE_CTL) { + requestTID = pRequest->CTLMsg.QMICTLMsgHdr.TransactionId; + responseTID = pResponse->CTLMsg.QMICTLMsgHdr.TransactionId; + } else { + requestTID = le16_to_cpu(pRequest->MUXMsg.QMUXHdr.TransactionId); + responseTID = le16_to_cpu(pResponse->MUXMsg.QMUXHdr.TransactionId); + } + return (requestTID == responseTID); + } + return 0; +} + +int (*qmidev_send)(PQCQMIMSG pRequest); + +int QmiThreadSendQMITimeout(PQCQMIMSG pRequest, PQCQMIMSG *ppResponse, unsigned msecs, const char *funcname) { + int ret; + + if (!pRequest) + return -EINVAL; + + pthread_mutex_lock(&cm_command_mutex); + + if (ppResponse) + *ppResponse = NULL; + + dump_qmi(pRequest, le16_to_cpu(pRequest->QMIHdr.Length) + 1); + + s_pRequest = pRequest; + s_pResponse = NULL; + + ret = qmidev_send(pRequest); + + if (ret == 0) { + ret = pthread_cond_timeout_np(&cm_command_cond, &cm_command_mutex, msecs); + if (!ret) { + if (s_pResponse && ppResponse) { + *ppResponse = s_pResponse; + } else { + if (s_pResponse) { + free(s_pResponse); + s_pResponse = NULL; + } + } + } else { + dbg_time("%s message timeout", funcname); + } + } + + pthread_mutex_unlock(&cm_command_mutex); + + return ret; +} + +void QmiThreadRecvQMI(PQCQMIMSG pResponse) { + pthread_mutex_lock(&cm_command_mutex); + if (pResponse == NULL) { + if (s_pRequest) { + free(s_pRequest); + s_pRequest = NULL; + s_pResponse = NULL; + pthread_cond_signal(&cm_command_cond); + } + pthread_mutex_unlock(&cm_command_mutex); + return; + } + dump_qmi(pResponse, le16_to_cpu(pResponse->QMIHdr.Length) + 1); + if (s_pRequest && is_response(s_pRequest, pResponse)) { + free(s_pRequest); + s_pRequest = NULL; + s_pResponse = malloc(le16_to_cpu(pResponse->QMIHdr.Length) + 1); + if (s_pResponse != NULL) { + memcpy(s_pResponse, pResponse, le16_to_cpu(pResponse->QMIHdr.Length) + 1); + } + pthread_cond_signal(&cm_command_cond); + } else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_CTL) + && (le16_to_cpu(pResponse->CTLMsg.QMICTLMsgHdrRsp.QMICTLType == QMICTL_REVOKE_CLIENT_ID_IND))) { + qmidevice_send_event_to_main(MODEM_REPORT_RESET_EVENT); + } else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_NAS) + && (le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.Type) == QMINAS_SERVING_SYSTEM_IND)) { + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_WDS) + && (le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.Type) == QMIWDS_GET_PKT_SRVC_STATUS_IND)) { + qmidevice_send_event_to_main(RIL_UNSOL_DATA_CALL_LIST_CHANGED); + } else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_NAS) + && (le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.Type) == QMINAS_SYS_INFO_IND)) { + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_WDS_ADMIN) + && (le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.Type) == QMI_WDA_SET_LOOPBACK_CONFIG_IND)) { + qmidevice_send_event_to_main_ext(RIL_UNSOL_LOOPBACK_CONFIG_IND, + &pResponse->MUXMsg.SetLoopBackInd, sizeof(pResponse->MUXMsg.SetLoopBackInd)); + } +#ifdef CONFIG_REG_QOS_IND + else if ((pResponse->QMIHdr.QMIType == QMUX_TYPE_QOS) + && (le16_to_cpu(pResponse->MUXMsg.QMUXMsgHdrResp.Type) == QMI_QOS_GLOBAL_QOS_FLOW_IND)) { + UINT qos_id = 0; + UCHAR new_flow = ql_get_global_qos_flow_ind_qos_id(pResponse, &qos_id); + if (qos_id != 0 && new_flow == 1) + qmidevice_send_event_to_main_ext(RIL_UNSOL_GLOBAL_QOS_FLOW_IND_QOS_ID, &qos_id, sizeof(qos_id)); +#ifdef CONFIG_GET_QOS_DATA_RATE + if (new_flow) { + ULONG64 max_data_rate[2] = {0}; + if (ql_get_global_qos_flow_ind_data_rate(pResponse, (void *)max_data_rate) == 0){} + } +#endif + } +#endif + else { + if (debug_qmi) + dbg_time("nobody care this qmi msg!!"); + } + pthread_mutex_unlock(&cm_command_mutex); +} + +#ifdef CONFIG_COEX_WWAN_STATE +static int requestGetCoexWWANState(void) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + PQMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND pLteBand; + static QMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND oldLteBand = {-1, -1}; + int err; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_COEX, QMI_COEX_GET_WWAN_STATE_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + + if (err < 0 || pResponse == NULL) { + dbg_time("%s err = %d", __func__, err); + return err; + } + + pMUXMsg = &pResponse->MUXMsg; + if (le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult) || le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError)) { + dbg_time("%s QMUXResult = 0x%x, QMUXError = 0x%x", __func__, le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult), le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError)); + err = le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError); + free(pResponse); + return err; + } + pLteBand = (PQMI_COEX_GET_WWAN_STATE_RESP_MSG_LTE_BAND)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + + if (pLteBand && memcmp(pLteBand, &oldLteBand, sizeof(oldLteBand))) { + oldLteBand = *pLteBand; + dbg_time("%s ul_freq %d ul_bandwidth %d", __func__, le32_to_cpu(pLteBand->ul_band.freq), le32_to_cpu(pLteBand->ul_band.bandwidth)); + dbg_time("%s dl_freq %d dl_bandwidth %d", __func__, le32_to_cpu(pLteBand->dl_band.freq), le32_to_cpu(pLteBand->dl_band.bandwidth)); + } + free(pResponse); + return 0; +} +#endif + +static int requestSetEthMode(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse = NULL; + PQMUX_MSG pMUXMsg; + int err; + PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV linkProto; + UCHAR IpPreference; + UCHAR autoconnect_setting = 0; + QMAP_SETTING qmap_settings = {0}; + + qmap_settings.size = sizeof(qmap_settings); + + if (profile->qmap_mode) { + profile->rawIP = 1; + s_9x07 = profile->rawIP; + + qmap_settings.MuxId = profile->muxid; + + if (profile->hardware_interface == HARDWARE_PCIE) { //SDX20_PCIE + qmap_settings.rx_urb_size = profile->qmap_size; //SDX24&SDX55 support 32KB + qmap_settings.ep_type = DATA_EP_TYPE_PCIE; + qmap_settings.iface_id = 0x04; + } + else { // for MDM9x07&MDM9x40&SDX20 USB + qmap_settings.rx_urb_size = profile->qmap_size; //SDX24&SDX55 support 32KB + qmap_settings.ep_type = DATA_EP_TYPE_HSUSB; + qmap_settings.iface_id = 0x04; + } + + qmap_settings.ul_data_aggregation_max_datagrams = 11; //by test result, 11 can get best TPUT + qmap_settings.ul_data_aggregation_max_size = 8*1024; + qmap_settings.dl_minimum_padding = 0; //no effect when register to real netowrk + if(profile->qmap_version != 0x09) + profile->qmap_version = 0x05; + + qmap_version = profile->qmap_version; + if (profile->rmnet_info.size) { + qmap_settings.rx_urb_size = profile->rmnet_info.rx_urb_size; + qmap_settings.ep_type = profile->rmnet_info.ep_type; + qmap_settings.iface_id = profile->rmnet_info.iface_id; + qmap_settings.dl_minimum_padding = profile->rmnet_info.dl_minimum_padding; + qmap_version = profile->rmnet_info.qmap_version; + } + + if (!profile->wda_client) { + if (qmidev_is_gobinet(profile->qmichannel)) { + //when QMAP enabled, set data format in GobiNet driver + } + else if (profile->proxy[0]) { + /* the first running 'quectel-cm' had alloc wda client and set data format, + so we can ingore to set data format here. */ + } + goto skip_WdaSetDataFormat; + } + } + + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS_ADMIN, QMIWDS_ADMIN_SET_DATA_FORMAT_REQ, WdaSetDataFormat, (void *)&qmap_settings); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (linkProto != NULL) { + profile->rawIP = (le32_to_cpu(linkProto->Value) == 2); + s_9x07 = profile->rawIP; //MDM90x7 only support RAW IP, do not support Eth + } + + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x16); + if (linkProto != NULL && profile->qmap_mode) { + qmap_settings.rx_urb_size = le32_to_cpu(linkProto->Value); + dbg_time("qmap_settings.rx_urb_size = %u", qmap_settings.rx_urb_size); //must same as rx_urb_size defined in GobiNet&qmi_wwan driver + } + +#ifdef QUECTEL_UL_DATA_AGG + if (qmap_settings.ul_data_aggregation_max_datagrams) + { + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x17); + if (linkProto != NULL) { + qmap_settings.ul_data_aggregation_max_datagrams = MIN(qmap_settings.ul_data_aggregation_max_datagrams, le32_to_cpu(linkProto->Value)); + dbg_time("qmap_settings.ul_data_aggregation_max_datagrams = %u", qmap_settings.ul_data_aggregation_max_datagrams); + } + + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x18); + if (linkProto != NULL) { + qmap_settings.ul_data_aggregation_max_size = MIN(qmap_settings.ul_data_aggregation_max_size, le32_to_cpu(linkProto->Value)); + dbg_time("qmap_settings.ul_data_aggregation_max_size = %u", qmap_settings.ul_data_aggregation_max_size); + } + + linkProto = (PQMIWDS_ADMIN_SET_DATA_FORMAT_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x1A); + if (linkProto != NULL) { + qmap_settings.dl_minimum_padding = le32_to_cpu(linkProto->Value); + dbg_time("qmap_settings.dl_minimum_padding = %u", qmap_settings.dl_minimum_padding); + } + + if (qmap_settings.ul_data_aggregation_max_datagrams > 1) { + ql_set_driver_qmap_setting(profile, &qmap_settings); + } + } +#endif + + free(pResponse); + +skip_WdaSetDataFormat: + if (profile->enable_ipv4) { + if (profile->qmapnet_adapter[0]) { + // bind wds mux data port + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_BIND_MUX_DATA_PORT_REQ , WdsSetQMUXBindMuxDataPort, (void *)&qmap_settings); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + if (pResponse) free(pResponse); + } + + // set ipv4 + IpPreference = IpFamilyV4; + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ, WdsSetClientIPFamilyPref, (void *)&IpPreference); + err = QmiThreadSendQMI(pRequest, &pResponse); + if (pResponse) free(pResponse); + } + + if (profile->enable_ipv6) { + if (profile->qmapnet_adapter[0]) { + // bind wds ipv6 mux data port + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS_IPV6, QMIWDS_BIND_MUX_DATA_PORT_REQ , WdsSetQMUXBindMuxDataPort, (void *)&qmap_settings); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + if (pResponse) free(pResponse); + } + + // set ipv6 + IpPreference = IpFamilyV6; + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS_IPV6, QMIWDS_SET_CLIENT_IP_FAMILY_PREF_REQ, WdsSetClientIPFamilyPref, (void *)&IpPreference); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + if (pResponse) free(pResponse); + } + + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_SET_AUTO_CONNECT_REQ , WdsSetAutoConnect, (void *)&autoconnect_setting); + QmiThreadSendQMI(pRequest, &pResponse); + if (pResponse) free(pResponse); + + return 0; +} + +#ifdef CONFIG_SIM +static int requestGetPINStatus(SIM_Status *pSIMStatus) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PQMIDMS_UIM_PIN_STATUS pPin1Status = NULL; + //PQMIDMS_UIM_PIN_STATUS pPin2Status = NULL; + + if (s_9x07) + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, QMIUIM_GET_CARD_STATUS_REQ, NULL, NULL); + else + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_UIM_GET_PIN_STATUS_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pPin1Status = (PQMIDMS_UIM_PIN_STATUS)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + //pPin2Status = (PQMIDMS_UIM_PIN_STATUS)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x12); + + if (pPin1Status != NULL) { + if (pPin1Status->PINStatus == QMI_PIN_STATUS_NOT_VERIF) { + *pSIMStatus = SIM_PIN; + } else if (pPin1Status->PINStatus == QMI_PIN_STATUS_BLOCKED) { + *pSIMStatus = SIM_PUK; + } else if (pPin1Status->PINStatus == QMI_PIN_STATUS_PERM_BLOCKED) { + *pSIMStatus = SIM_BAD; + } + } + + free(pResponse); + return 0; +} + +static int requestGetSIMStatus(SIM_Status *pSIMStatus) { //RIL_REQUEST_GET_SIM_STATUS + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + const char * SIM_Status_String[] = { + "SIM_ABSENT", + "SIM_NOT_READY", + "SIM_READY", /* SIM_READY means the radio state is RADIO_STATE_SIM_READY */ + "SIM_PIN", + "SIM_PUK", + "SIM_NETWORK_PERSONALIZATION" + }; + + if (s_9x07) + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, QMIUIM_GET_CARD_STATUS_REQ, NULL, NULL); + else + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_UIM_GET_STATE_REQ, NULL, NULL); + + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + *pSIMStatus = SIM_ABSENT; + if (s_9x07) + { + PQMIUIM_CARD_STATUS pCardStatus = NULL; + PQMIUIM_PIN_STATE pPINState = NULL; + UCHAR CardState = 0x01; + UCHAR PIN1State = QMI_PIN_STATUS_NOT_VERIF; + //UCHAR PIN1Retries; + //UCHAR PUK1Retries; + //UCHAR PIN2State; + //UCHAR PIN2Retries; + //UCHAR PUK2Retries; + + pCardStatus = (PQMIUIM_CARD_STATUS)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + if (pCardStatus != NULL) + { + pPINState = (PQMIUIM_PIN_STATE)((PUCHAR)pCardStatus + sizeof(QMIUIM_CARD_STATUS) + pCardStatus->AIDLength); + CardState = pCardStatus->CardState; + if (CardState == UIM_CARD_STATE_PRESENT) { + if (pPINState->UnivPIN == 1) + { + PIN1State = pCardStatus->UPINState; + //PIN1Retries = pCardStatus->UPINRetries; + //PUK1Retries = pCardStatus->UPUKRetries; + } + else + { + PIN1State = pPINState->PIN1State; + //PIN1Retries = pPINState->PIN1Retries; + //PUK1Retries = pPINState->PUK1Retries; + } + //PIN2State = pPINState->PIN2State; + //PIN2Retries = pPINState->PIN2Retries; + //PUK2Retries = pPINState->PUK2Retries; + } + } + + *pSIMStatus = SIM_ABSENT; + if ((CardState == 0x01) && ((PIN1State == QMI_PIN_STATUS_VERIFIED)|| (PIN1State == QMI_PIN_STATUS_DISABLED))) + { + *pSIMStatus = SIM_READY; + } + else if (CardState == 0x01) + { + if (PIN1State == QMI_PIN_STATUS_NOT_VERIF) + { + *pSIMStatus = SIM_PIN; + } + if ( PIN1State == QMI_PIN_STATUS_BLOCKED) + { + *pSIMStatus = SIM_PUK; + } + else if (PIN1State == QMI_PIN_STATUS_PERM_BLOCKED) + { + *pSIMStatus = SIM_BAD; + } + else if (PIN1State == QMI_PIN_STATUS_NOT_INIT || PIN1State == QMI_PIN_STATUS_VERIFIED || PIN1State == QMI_PIN_STATUS_DISABLED) + { + *pSIMStatus = SIM_READY; + } + } + else if (CardState == 0x00 || CardState == 0x02) + { + } + else + { + } + } + else + { + //UIM state. Values: + // 0x00 UIM initialization completed + // 0x01 UIM is locked or the UIM failed + // 0x02 UIM is not present + // 0x03 Reserved + // 0xFF UIM state is currently + //unavailable + if (pResponse->MUXMsg.UIMGetStateResp.UIMState == 0x00) { + *pSIMStatus = SIM_READY; + } else if (pResponse->MUXMsg.UIMGetStateResp.UIMState == 0x01) { + *pSIMStatus = SIM_ABSENT; + err = requestGetPINStatus(pSIMStatus); + } else if ((pResponse->MUXMsg.UIMGetStateResp.UIMState == 0x02) || (pResponse->MUXMsg.UIMGetStateResp.UIMState == 0xFF)) { + *pSIMStatus = SIM_ABSENT; + } else { + *pSIMStatus = SIM_ABSENT; + } + } + dbg_time("%s SIMStatus: %s", __func__, SIM_Status_String[*pSIMStatus]); + + free(pResponse); + + return 0; +} + +static int requestEnterSimPin(const char *pPinCode) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + + if (s_9x07) + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, QMIUIM_VERIFY_PIN_REQ, UimVerifyPinReqSend, (void *)pPinCode); + else + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_UIM_VERIFY_PIN_REQ, DmsUIMVerifyPinReqSend, (void *)pPinCode); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + free(pResponse); + return 0; +} +#endif + +#ifdef CONFIG_IMSI_ICCID +static int requestGetICCID(void) { //RIL_REQUEST_GET_IMSI + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + PQMIUIM_CONTENT pUimContent; + int err; + + if (s_9x07) { + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, QMIUIM_READ_TRANSPARENT_REQ, UimReadTransparentIMSIReqSend, (void *)"EF_ICCID"); + err = QmiThreadSendQMI(pRequest, &pResponse); + } else { + return 0; + } + qmi_rsp_check_and_return(); + + pUimContent = (PQMIUIM_CONTENT)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (pUimContent != NULL) { + static char DeviceICCID[32] = {'\0'}; + int i = 0, j = 0; + + for (i = 0, j = 0; i < le16_to_cpu(pUimContent->content_len); ++i) { + char charmaps[] = "0123456789ABCDEF"; + + DeviceICCID[j++] = charmaps[(pUimContent->content[i] & 0x0F)]; + DeviceICCID[j++] = charmaps[((pUimContent->content[i] & 0xF0) >> 0x04)]; + } + DeviceICCID[j] = '\0'; + + dbg_time("%s DeviceICCID: %s", __func__, DeviceICCID); + } + + free(pResponse); + return 0; +} + +static int requestGetIMSI(void) { //RIL_REQUEST_GET_IMSI + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + PQMIUIM_CONTENT pUimContent; + int err; + + if (s_9x07) { + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, QMIUIM_READ_TRANSPARENT_REQ, UimReadTransparentIMSIReqSend, (void *)"EF_IMSI"); + err = QmiThreadSendQMI(pRequest, &pResponse); + } else { + return 0; + } + qmi_rsp_check_and_return(); + + pUimContent = (PQMIUIM_CONTENT)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (pUimContent != NULL) { + static char DeviceIMSI[32] = {'\0'}; + int i = 0, j = 0; + + for (i = 0, j = 0; i < le16_to_cpu(pUimContent->content[0]); ++i) { + if (i != 0) + DeviceIMSI[j++] = (pUimContent->content[i+1] & 0x0F) + '0'; + DeviceIMSI[j++] = ((pUimContent->content[i+1] & 0xF0) >> 0x04) + '0'; + } + DeviceIMSI[j] = '\0'; + + dbg_time("%s DeviceIMSI: %s", __func__, DeviceIMSI); + } + + free(pResponse); + return 0; +} +#endif + +#if 1 +static void quectel_convert_cdma_mcc_2_ascii_mcc( USHORT *p_mcc, USHORT mcc ) +{ + unsigned int d1, d2, d3, buf = mcc + 111; + + if ( mcc == 0x3FF ) // wildcard + { + *p_mcc = 3; + } + else + { + d3 = buf % 10; + buf = ( d3 == 0 ) ? (buf-10)/10 : buf/10; + + d2 = buf % 10; + buf = ( d2 == 0 ) ? (buf-10)/10 : buf/10; + + d1 = ( buf == 10 ) ? 0 : buf; + +//dbg_time("d1:%d, d2:%d,d3:%d",d1,d2,d3); + if ( d1<10 && d2<10 && d3<10 ) + { + *p_mcc = d1*100+d2*10+d3; +#if 0 + *(p_mcc+0) = '0' + d1; + *(p_mcc+1) = '0' + d2; + *(p_mcc+2) = '0' + d3; +#endif + } + else + { + //dbg_time( "invalid digits %d %d %d", d1, d2, d3 ); + *p_mcc = 0; + } + } +} + +static void quectel_convert_cdma_mnc_2_ascii_mnc( USHORT *p_mnc, USHORT imsi_11_12) +{ + unsigned int d1, d2, buf = imsi_11_12 + 11; + + if ( imsi_11_12 == 0x7F ) // wildcard + { + *p_mnc = 7; + } + else + { + d2 = buf % 10; + buf = ( d2 == 0 ) ? (buf-10)/10 : buf/10; + + d1 = ( buf == 10 ) ? 0 : buf; + + if ( d1<10 && d2<10 ) + { + *p_mnc = d1*10 + d2; + } + else + { + //dbg_time( "invalid digits %d %d", d1, d2, 0 ); + *p_mnc = 0; + } + } +} + +static int requestGetHomeNetwork(USHORT *p_mcc, USHORT *p_mnc, USHORT *p_sid, USHORT *p_nid) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PHOME_NETWORK pHomeNetwork; + PHOME_NETWORK_SYSTEMID pHomeNetworkSystemID; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_HOME_NETWORK_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pHomeNetwork = (PHOME_NETWORK)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + if (pHomeNetwork && p_mcc && p_mnc ) { + *p_mcc = le16_to_cpu(pHomeNetwork->MobileCountryCode); + *p_mnc = le16_to_cpu(pHomeNetwork->MobileNetworkCode); + //dbg_time("%s MobileCountryCode: %d, MobileNetworkCode: %d", __func__, *pMobileCountryCode, *pMobileNetworkCode); + } + + pHomeNetworkSystemID = (PHOME_NETWORK_SYSTEMID)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + if (pHomeNetworkSystemID && p_sid && p_nid) { + *p_sid = le16_to_cpu(pHomeNetworkSystemID->SystemID); //china-hefei: sid 14451 + *p_nid = le16_to_cpu(pHomeNetworkSystemID->NetworkID); + //dbg_time("%s SystemID: %d, NetworkID: %d", __func__, *pSystemID, *pNetworkID); + } + + free(pResponse); + + return 0; +} +#endif + +#if 0 +// Lookup table for carriers known to produce SIMs which incorrectly indicate MNC length. +static const char * MCCMNC_CODES_HAVING_3DIGITS_MNC[] = { + "302370", "302720", "310260", + "405025", "405026", "405027", "405028", "405029", "405030", "405031", "405032", + "405033", "405034", "405035", "405036", "405037", "405038", "405039", "405040", + "405041", "405042", "405043", "405044", "405045", "405046", "405047", "405750", + "405751", "405752", "405753", "405754", "405755", "405756", "405799", "405800", + "405801", "405802", "405803", "405804", "405805", "405806", "405807", "405808", + "405809", "405810", "405811", "405812", "405813", "405814", "405815", "405816", + "405817", "405818", "405819", "405820", "405821", "405822", "405823", "405824", + "405825", "405826", "405827", "405828", "405829", "405830", "405831", "405832", + "405833", "405834", "405835", "405836", "405837", "405838", "405839", "405840", + "405841", "405842", "405843", "405844", "405845", "405846", "405847", "405848", + "405849", "405850", "405851", "405852", "405853", "405875", "405876", "405877", + "405878", "405879", "405880", "405881", "405882", "405883", "405884", "405885", + "405886", "405908", "405909", "405910", "405911", "405912", "405913", "405914", + "405915", "405916", "405917", "405918", "405919", "405920", "405921", "405922", + "405923", "405924", "405925", "405926", "405927", "405928", "405929", "405930", + "405931", "405932", "502142", "502143", "502145", "502146", "502147", "502148" +}; + +static const char * MCC_CODES_HAVING_3DIGITS_MNC[] = { + "302", //Canada + "310", //United States of America + "311", //United States of America + "312", //United States of America + "313", //United States of America + "314", //United States of America + "315", //United States of America + "316", //United States of America + "334", //Mexico + "338", //Jamaica + "342", //Barbados + "344", //Antigua and Barbuda + "346", //Cayman Islands + "348", //British Virgin Islands + "365", //Anguilla + "708", //Honduras (Republic of) + "722", //Argentine Republic + "732" //Colombia (Republic of) +}; + +int requestGetIMSI(const char **pp_imsi, USHORT *pMobileCountryCode, USHORT *pMobileNetworkCode) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + + if (pp_imsi) *pp_imsi = NULL; + if (pMobileCountryCode) *pMobileCountryCode = 0; + if (pMobileNetworkCode) *pMobileNetworkCode = 0; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_UIM_GET_IMSI_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + if (pMUXMsg->UIMGetIMSIResp.TLV2Type == 0x01 && le16_to_cpu(pMUXMsg->UIMGetIMSIResp.TLV2Length) >= 5) { + int mnc_len = 2; + unsigned i; + char tmp[4]; + + if (pp_imsi) *pp_imsi = strndup((const char *)(&pMUXMsg->UIMGetIMSIResp.IMSI), le16_to_cpu(pMUXMsg->UIMGetIMSIResp.TLV2Length)); + + for (i = 0; i < sizeof(MCCMNC_CODES_HAVING_3DIGITS_MNC)/sizeof(MCCMNC_CODES_HAVING_3DIGITS_MNC[0]); i++) { + if (!strncmp((const char *)(&pMUXMsg->UIMGetIMSIResp.IMSI), MCCMNC_CODES_HAVING_3DIGITS_MNC[i], 6)) { + mnc_len = 3; + break; + } + } + if (mnc_len == 2) { + for (i = 0; i < sizeof(MCC_CODES_HAVING_3DIGITS_MNC)/sizeof(MCC_CODES_HAVING_3DIGITS_MNC[0]); i++) { + if (!strncmp((const char *)(&pMUXMsg->UIMGetIMSIResp.IMSI), MCC_CODES_HAVING_3DIGITS_MNC[i], 3)) { + mnc_len = 3; + break; + } + } + } + + tmp[0] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[0]; + tmp[1] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[1]; + tmp[2] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[2]; + tmp[3] = 0; + if (pMobileCountryCode) *pMobileCountryCode = atoi(tmp); + tmp[0] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[3]; + tmp[1] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[4]; + tmp[2] = 0; + if (mnc_len == 3) { + tmp[2] = (&pMUXMsg->UIMGetIMSIResp.IMSI)[6]; + } + if (pMobileNetworkCode) *pMobileNetworkCode = atoi(tmp); + } + + free(pResponse); + + return 0; +} +#endif + +static struct wwan_data_class_str class2str[] = { + {WWAN_DATA_CLASS_NONE, "UNKNOWN"}, + {WWAN_DATA_CLASS_GPRS, "GPRS"}, + {WWAN_DATA_CLASS_EDGE, "EDGE"}, + {WWAN_DATA_CLASS_UMTS, "UMTS"}, + {WWAN_DATA_CLASS_HSDPA, "HSDPA"}, + {WWAN_DATA_CLASS_HSUPA, "HSUPA"}, + {WWAN_DATA_CLASS_LTE, "LTE"}, + {WWAN_DATA_CLASS_5G_NSA, "5G_NSA"}, + {WWAN_DATA_CLASS_5G_SA, "5G_SA"}, + {WWAN_DATA_CLASS_1XRTT, "1XRTT"}, + {WWAN_DATA_CLASS_1XEVDO, "1XEVDO"}, + {WWAN_DATA_CLASS_1XEVDO_REVA, "1XEVDO_REVA"}, + {WWAN_DATA_CLASS_1XEVDV, "1XEVDV"}, + {WWAN_DATA_CLASS_3XRTT, "3XRTT"}, + {WWAN_DATA_CLASS_1XEVDO_REVB, "1XEVDO_REVB"}, + {WWAN_DATA_CLASS_UMB, "UMB"}, + {WWAN_DATA_CLASS_CUSTOM, "CUSTOM"}, +}; + +static const char *wwan_data_class2str(ULONG class) +{ + unsigned int i = 0; + for (i = 0; i < sizeof(class2str)/sizeof(class2str[0]); i++) { + if (class2str[i].class == class) { + return class2str[i].str; + } + } + return "UNKNOWN"; +} + +static USHORT char2ushort(UCHAR str[3]) { + int i; + char temp[4]; + USHORT ret= 0; + + memcpy(temp, str, 3); + temp[3] = '\0'; + + for (i = 0; i < 4; i++) { + if ((UCHAR)temp[i] == 0xFF) { + temp[i] = '\0'; + } + } + ret = (USHORT)atoi(temp); + + return ret; +} + +static int requestRegistrationState2(UCHAR *pPSAttachedState) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + USHORT MobileCountryCode = 0; + USHORT MobileNetworkCode = 0; + const char *pDataCapStr = "UNKNOW"; + LONG remainingLen; + PSERVICE_STATUS_INFO pServiceStatusInfo; + int is_lte = 0; + PCDMA_SYSTEM_INFO pCdmaSystemInfo; + PHDR_SYSTEM_INFO pHdrSystemInfo; + PGSM_SYSTEM_INFO pGsmSystemInfo; + PWCDMA_SYSTEM_INFO pWcdmaSystemInfo; + PLTE_SYSTEM_INFO pLteSystemInfo; + PTDSCDMA_SYSTEM_INFO pTdscdmaSystemInfo; + PNR5G_SYSTEM_INFO pNr5gSystemInfo; + UCHAR DeviceClass = 0; + ULONG DataCapList = 0; + + /* Additional LTE System Info - Availability of Dual connectivity of E-UTRA with NR5G */ + uint8_t endc_available_valid = 0; /**< Must be set to true if endc_available is being passed */ + uint8_t endc_available = 0x00; + /**< + Upper layer indication in LTE SIB2. Values: \n + - 0x00 -- 5G Not available \n + - 0x01 -- 5G Available + + */ + /* Additional LTE System Info - DCNR restriction Info */ + uint8_t restrict_dcnr_valid = 0; /**< Must be set to true if restrict_dcnr is being passed */ + uint8_t restrict_dcnr = 0x01; + /**< + DCNR restriction in NAS attach/TAU accept. Values: \n + - 0x00 -- Not restricted \n + - 0x01 -- Restricted + */ + + *pPSAttachedState = 0; + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_SYS_INFO_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pServiceStatusInfo = (PSERVICE_STATUS_INFO)(((PCHAR)&pMUXMsg->GetSysInfoResp) + QCQMUX_MSG_HDR_SIZE); + remainingLen = le16_to_cpu(pMUXMsg->GetSysInfoResp.Length); + + s_is_cdma = 0; + s_5g_type = WWAN_DATA_CLASS_NONE; + s_hdr_personality = 0; + while (remainingLen > 0) { + switch (pServiceStatusInfo->TLVType) { + case 0x10: // CDMA + if (pServiceStatusInfo->SrvStatus == 0x02) { + DataCapList = WWAN_DATA_CLASS_1XRTT| + WWAN_DATA_CLASS_1XEVDO| + WWAN_DATA_CLASS_1XEVDO_REVA| + WWAN_DATA_CLASS_1XEVDV| + WWAN_DATA_CLASS_1XEVDO_REVB; + DeviceClass = DEVICE_CLASS_CDMA; + s_is_cdma = (0 == is_lte); + } + break; + case 0x11: // HDR + if (pServiceStatusInfo->SrvStatus == 0x02) { + DataCapList = WWAN_DATA_CLASS_3XRTT| + WWAN_DATA_CLASS_UMB; + DeviceClass = DEVICE_CLASS_CDMA; + s_is_cdma = (0 == is_lte); + } + break; + case 0x12: // GSM + if (pServiceStatusInfo->SrvStatus == 0x02) { + DataCapList = WWAN_DATA_CLASS_GPRS| + WWAN_DATA_CLASS_EDGE; + DeviceClass = DEVICE_CLASS_GSM; + } + break; + case 0x13: // WCDMA + if (pServiceStatusInfo->SrvStatus == 0x02) { + DataCapList = WWAN_DATA_CLASS_UMTS; + DeviceClass = DEVICE_CLASS_GSM; + } + break; + case 0x14: // LTE + if (pServiceStatusInfo->SrvStatus == 0x02) { + DataCapList = WWAN_DATA_CLASS_LTE; + DeviceClass = DEVICE_CLASS_GSM; + is_lte = 1; + s_is_cdma = 0; + } + break; + case 0x4A: // NR5G Service Status Info + if (pServiceStatusInfo->SrvStatus == NAS_SYS_SRV_STATUS_SRV_V01) { + DataCapList |= WWAN_DATA_CLASS_5G_SA; + DeviceClass = DEVICE_CLASS_GSM; + is_lte = 1; + s_is_cdma = 0; + } + break; + case 0x4B: // NR5G System Info + pNr5gSystemInfo = (PNR5G_SYSTEM_INFO)pServiceStatusInfo; + if (pNr5gSystemInfo->srv_domain_valid == 0x01) { + if (pNr5gSystemInfo->srv_domain & SYS_SRV_DOMAIN_PS_ONLY_V01) { + *pPSAttachedState = 1; + } + } + + if (pNr5gSystemInfo->network_id_valid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pNr5gSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pNr5gSystemInfo->MNC); + } + break; + case 0x4E: //Additional LTE System Info - Availability of Dual Connectivity of E-UTRA with NR5G + endc_available_valid = 1; + endc_available = pServiceStatusInfo->SrvStatus; + break; + + case 0x4F: //Additional LTE System Info - DCNR restriction Info + restrict_dcnr_valid = 1; + restrict_dcnr = pServiceStatusInfo->SrvStatus; + break; + + case 0x24: // TDSCDMA + if (pServiceStatusInfo->SrvStatus == 0x02) { + pDataCapStr = "TD-SCDMA"; + } + break; + case 0x15: // CDMA + // CDMA_SYSTEM_INFO + pCdmaSystemInfo = (PCDMA_SYSTEM_INFO)pServiceStatusInfo; + if (pCdmaSystemInfo->SrvDomainValid == 0x01) { + if (pCdmaSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + s_is_cdma = (0 == is_lte); + } + } +#if 0 + if (pCdmaSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pCdmaSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + s_is_cdma = (0 == is_lte); + } + } +#endif + if (pCdmaSystemInfo->NetworkIdValid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pCdmaSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pCdmaSystemInfo->MNC); + } + break; + case 0x16: // HDR + // HDR_SYSTEM_INFO + pHdrSystemInfo = (PHDR_SYSTEM_INFO)pServiceStatusInfo; + if (pHdrSystemInfo->SrvDomainValid == 0x01) { + if (pHdrSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + s_is_cdma = (0 == is_lte); + } + } +#if 0 + if (pHdrSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pHdrSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + s_is_cdma = (0 == is_lte); + } + } +#endif + if (*pPSAttachedState && pHdrSystemInfo->HdrPersonalityValid == 0x01) { + if (pHdrSystemInfo->HdrPersonality == 0x03) + s_hdr_personality = 0x02; + //else if (pHdrSystemInfo->HdrPersonality == 0x02) + // s_hdr_personality = 0x01; + } + USHORT cmda_mcc = 0, cdma_mnc = 0; + if(!requestGetHomeNetwork(&cmda_mcc, &cdma_mnc,NULL, NULL) && cmda_mcc) { + quectel_convert_cdma_mcc_2_ascii_mcc(&MobileCountryCode, cmda_mcc); + quectel_convert_cdma_mnc_2_ascii_mnc(&MobileNetworkCode, cdma_mnc); + } + break; + case 0x17: // GSM + // GSM_SYSTEM_INFO + pGsmSystemInfo = (PGSM_SYSTEM_INFO)pServiceStatusInfo; + if (pGsmSystemInfo->SrvDomainValid == 0x01) { + if (pGsmSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + } + } +#if 0 + if (pGsmSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pGsmSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + } + } +#endif + if (pGsmSystemInfo->NetworkIdValid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pGsmSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pGsmSystemInfo->MNC); + } + break; + case 0x18: // WCDMA + // WCDMA_SYSTEM_INFO + pWcdmaSystemInfo = (PWCDMA_SYSTEM_INFO)pServiceStatusInfo; + if (pWcdmaSystemInfo->SrvDomainValid == 0x01) { + if (pWcdmaSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + } + } +#if 0 + if (pWcdmaSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pWcdmaSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + } + } +#endif + if (pWcdmaSystemInfo->NetworkIdValid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pWcdmaSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pWcdmaSystemInfo->MNC); + } + break; + case 0x19: // LTE_SYSTEM_INFO + // LTE_SYSTEM_INFO + pLteSystemInfo = (PLTE_SYSTEM_INFO)pServiceStatusInfo; + if (pLteSystemInfo->SrvDomainValid == 0x01) { + if (pLteSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + is_lte = 1; + s_is_cdma = 0; + } + } +#if 0 + if (pLteSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pLteSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + is_lte = 1; + s_is_cdma = 0; + } + } +#endif + if (pLteSystemInfo->NetworkIdValid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pLteSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pLteSystemInfo->MNC); + } + break; + case 0x25: // TDSCDMA + // TDSCDMA_SYSTEM_INFO + pTdscdmaSystemInfo = (PTDSCDMA_SYSTEM_INFO)pServiceStatusInfo; + if (pTdscdmaSystemInfo->SrvDomainValid == 0x01) { + if (pTdscdmaSystemInfo->SrvDomain & 0x02) { + *pPSAttachedState = 1; + } + } +#if 0 + if (pTdscdmaSystemInfo->SrvCapabilityValid == 0x01) { + *pPSAttachedState = 0; + if (pTdscdmaSystemInfo->SrvCapability & 0x02) { + *pPSAttachedState = 1; + } + } +#endif + if (pTdscdmaSystemInfo->NetworkIdValid == 0x01) { + MobileCountryCode = (USHORT)char2ushort(pTdscdmaSystemInfo->MCC); + MobileNetworkCode = (USHORT)char2ushort(pTdscdmaSystemInfo->MNC); + } + break; + default: + break; + } /* switch (pServiceStatusInfo->TLYType) */ + + remainingLen -= (le16_to_cpu(pServiceStatusInfo->TLVLength) + 3); + pServiceStatusInfo = (PSERVICE_STATUS_INFO)((PCHAR)&pServiceStatusInfo->TLVLength + le16_to_cpu(pServiceStatusInfo->TLVLength) + sizeof(USHORT)); + } /* while (remainingLen > 0) */ + + if (DataCapList & WWAN_DATA_CLASS_LTE) { + if (endc_available_valid && restrict_dcnr_valid) { + if (endc_available && !restrict_dcnr) { + DataCapList |= WWAN_DATA_CLASS_5G_NSA; + } + } + } + + if (DeviceClass == DEVICE_CLASS_CDMA) { + if (s_hdr_personality == 2) { + pDataCapStr = s_hdr_personality == 2 ? "eHRPD" : "HRPD"; + } else if (DataCapList & WWAN_DATA_CLASS_1XEVDO_REVB) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_1XEVDO_REVB); + } else if (DataCapList & WWAN_DATA_CLASS_1XEVDO_REVA) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_1XEVDO_REVA); + } else if (DataCapList & WWAN_DATA_CLASS_1XEVDO) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_1XEVDO); + } else if (DataCapList & WWAN_DATA_CLASS_1XRTT) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_1XRTT); + } else if (DataCapList & WWAN_DATA_CLASS_3XRTT) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_3XRTT); + } else if (DataCapList & WWAN_DATA_CLASS_UMB) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_UMB); + } + } else { + if (DataCapList & WWAN_DATA_CLASS_5G_SA) { + s_5g_type = WWAN_DATA_CLASS_5G_SA; + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_5G_SA); + } else if (DataCapList & WWAN_DATA_CLASS_5G_NSA) { + s_5g_type = WWAN_DATA_CLASS_5G_NSA; + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_5G_NSA); + } else if (DataCapList & WWAN_DATA_CLASS_LTE) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_LTE); + } else if ((DataCapList & WWAN_DATA_CLASS_HSDPA) && (DataCapList & WWAN_DATA_CLASS_HSUPA)) { + pDataCapStr = "HSDPA_HSUPA"; + } else if (DataCapList & WWAN_DATA_CLASS_HSDPA) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_HSDPA); + } else if (DataCapList & WWAN_DATA_CLASS_HSUPA) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_HSUPA); + } else if (DataCapList & WWAN_DATA_CLASS_UMTS) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_UMTS); + } else if (DataCapList & WWAN_DATA_CLASS_EDGE) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_EDGE); + } else if (DataCapList & WWAN_DATA_CLASS_GPRS) { + pDataCapStr = wwan_data_class2str(WWAN_DATA_CLASS_GPRS); + } + } + + dbg_time("%s MCC: %d, MNC: %d, PS: %s, DataCap: %s", __func__, + MobileCountryCode, MobileNetworkCode, (*pPSAttachedState == 1) ? "Attached" : "Detached" , pDataCapStr); + + free(pResponse); + + return 0; +} + +static int requestRegistrationState(UCHAR *pPSAttachedState) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PQMINAS_CURRENT_PLMN_MSG pCurrentPlmn; + PSERVING_SYSTEM pServingSystem; + PQMINAS_DATA_CAP pDataCap; + USHORT MobileCountryCode = 0; + USHORT MobileNetworkCode = 0; + const char *pDataCapStr = "UNKNOW"; + + if (s_9x07) { + return requestRegistrationState2(pPSAttachedState); + } + + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_SERVING_SYSTEM_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pCurrentPlmn = (PQMINAS_CURRENT_PLMN_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x12); + if (pCurrentPlmn) { + MobileCountryCode = le16_to_cpu(pCurrentPlmn->MobileCountryCode); + MobileNetworkCode = le16_to_cpu(pCurrentPlmn->MobileNetworkCode); + } + + *pPSAttachedState = 0; + pServingSystem = (PSERVING_SYSTEM)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + if (pServingSystem) { + //Packet-switched domain attach state of the mobile. + //0x00 PS_UNKNOWN ?Unknown or not applicable + //0x01 PS_ATTACHED ?Attached + //0x02 PS_DETACHED ?Detached + *pPSAttachedState = pServingSystem->RegistrationState; + if (pServingSystem->RegistrationState == 0x01) //0x01 ?C REGISTERED ?C Registered with a network + *pPSAttachedState = pServingSystem->PSAttachedState; + else { + //MobileCountryCode = MobileNetworkCode = 0; + *pPSAttachedState = 0x02; + } + } + + pDataCap = (PQMINAS_DATA_CAP)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (pDataCap && pDataCap->DataCapListLen) { + UCHAR *DataCap = &pDataCap->DataCap; + if (pDataCap->DataCapListLen == 2) { + if ((DataCap[0] == 0x06) && ((DataCap[1] == 0x08) || (DataCap[1] == 0x0A))) + DataCap[0] = DataCap[1]; + } + switch (DataCap[0]) { + case 0x01: pDataCapStr = "GPRS"; break; + case 0x02: pDataCapStr = "EDGE"; break; + case 0x03: pDataCapStr = "HSDPA"; break; + case 0x04: pDataCapStr = "HSUPA"; break; + case 0x05: pDataCapStr = "UMTS"; break; + case 0x06: pDataCapStr = "1XRTT"; break; + case 0x07: pDataCapStr = "1XEVDO"; break; + case 0x08: pDataCapStr = "1XEVDO_REVA"; break; + case 0x09: pDataCapStr = "GPRS"; break; + case 0x0A: pDataCapStr = "1XEVDO_REVB"; break; + case 0x0B: pDataCapStr = "LTE"; break; + case 0x0C: pDataCapStr = "HSDPA"; break; + case 0x0D: pDataCapStr = "HSDPA"; break; + default: pDataCapStr = "UNKNOW"; break; + } + } + + if (pServingSystem && pServingSystem->RegistrationState == 0x01 && pServingSystem->InUseRadioIF && pServingSystem->RadioIF == 0x09) { + pDataCapStr = "TD-SCDMA"; + } + + s_is_cdma = 0; + if (pServingSystem && pServingSystem->RegistrationState == 0x01 && pServingSystem->InUseRadioIF && (pServingSystem->RadioIF == 0x01 || pServingSystem->RadioIF == 0x02)) { + USHORT cmda_mcc = 0, cdma_mnc = 0; + s_is_cdma = 1; + if(!requestGetHomeNetwork(&cmda_mcc, &cdma_mnc,NULL, NULL) && cmda_mcc) { + quectel_convert_cdma_mcc_2_ascii_mcc(&MobileCountryCode, cmda_mcc); + quectel_convert_cdma_mnc_2_ascii_mnc(&MobileNetworkCode, cdma_mnc); + } + if (1) { + PQCQMUX_TLV pTLV = (PQCQMUX_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x23); + if (pTLV) + s_hdr_personality = pTLV->Value; + else + s_hdr_personality = 0; + if (s_hdr_personality == 2) + pDataCapStr = "eHRPD"; + } + } + + dbg_time("%s MCC: %d, MNC: %d, PS: %s, DataCap: %s", __func__, + MobileCountryCode, MobileNetworkCode, (*pPSAttachedState == 1) ? "Attached" : "Detached" , pDataCapStr); + + free(pResponse); + + return 0; +} + +static int requestQueryDataCall(UCHAR *pConnectionStatus, int curIpFamily) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PQMIWDS_PKT_SRVC_TLV pPktSrvc; + UCHAR oldConnectionStatus = *pConnectionStatus; + UCHAR QMIType = (curIpFamily == IpFamilyV4) ? QMUX_TYPE_WDS : QMUX_TYPE_WDS_IPV6; + + pRequest = ComposeQMUXMsg(QMIType, QMIWDS_GET_PKT_SRVC_STATUS_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + pPktSrvc = (PQMIWDS_PKT_SRVC_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + if (pPktSrvc) { + *pConnectionStatus = pPktSrvc->ConnectionStatus; + if ((le16_to_cpu(pPktSrvc->TLVLength) == 2) && (pPktSrvc->ReconfigReqd == 0x01)) + *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + } + + if (*pConnectionStatus == QWDS_PKT_DATA_DISCONNECTED) { + if (curIpFamily == IpFamilyV4) + WdsConnectionIPv4Handle = 0; + else + WdsConnectionIPv6Handle = 0; + } + + if (oldConnectionStatus != *pConnectionStatus || debug_qmi) { + dbg_time("%s %sConnectionStatus: %s", __func__, (curIpFamily == IpFamilyV4) ? "IPv4" : "IPv6", + (*pConnectionStatus == QWDS_PKT_DATA_CONNECTED) ? "CONNECTED" : "DISCONNECTED"); + } + + free(pResponse); + return 0; +} + +static int requestSetupDataCall(PROFILE_T *profile, int curIpFamily) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err = 0; + UCHAR QMIType = (curIpFamily == IpFamilyV4) ? QMUX_TYPE_WDS : QMUX_TYPE_WDS_IPV6; + +//DualIPSupported means can get ipv4 & ipv6 address at the same time, one wds for ipv4, the other wds for ipv6 + profile->curIpFamily = curIpFamily; + pRequest = ComposeQMUXMsg(QMIType, QMIWDS_START_NETWORK_INTERFACE_REQ, WdsStartNwInterfaceReq, profile); + err = QmiThreadSendQMITimeout(pRequest, &pResponse, 120 * 1000, __func__); + qmi_rsp_check(); + + if (le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXResult) || le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError)) { + PQMI_TLV_HDR pTLVHdr; + + pTLVHdr = GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + if (pTLVHdr) { + uint16_t *data16 = (uint16_t *)(pTLVHdr+1); + uint16_t call_end_reason = le16_to_cpu(data16[0]); + dbg_time("call_end_reason is %d", call_end_reason); + } + + pTLVHdr = GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (pTLVHdr) { + uint16_t *data16 = (uint16_t *)(pTLVHdr+1); + uint16_t call_end_reason_type = le16_to_cpu(data16[0]); + uint16_t verbose_call_end_reason = le16_to_cpu(data16[1]); + + dbg_time("call_end_reason_type is %d", call_end_reason_type); + dbg_time("call_end_reason_verbose is %d", verbose_call_end_reason); + } + + err = le16_to_cpu(pMUXMsg->QMUXMsgHdrResp.QMUXError); + free(pResponse); + return err; + } + + if (curIpFamily == IpFamilyV4) { + WdsConnectionIPv4Handle = le32_to_cpu(pResponse->MUXMsg.StartNwInterfaceResp.Handle); + dbg_time("%s WdsConnectionIPv4Handle: 0x%08x", __func__, WdsConnectionIPv4Handle); + } else { + WdsConnectionIPv6Handle = le32_to_cpu(pResponse->MUXMsg.StartNwInterfaceResp.Handle); + dbg_time("%s WdsConnectionIPv6Handle: 0x%08x", __func__, WdsConnectionIPv6Handle); + } + + free(pResponse); + + return 0; +} + +static int requestDeactivateDefaultPDP(PROFILE_T *profile, int curIpFamily) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + UCHAR QMIType = (curIpFamily == 0x04) ? QMUX_TYPE_WDS : QMUX_TYPE_WDS_IPV6; + + (void)profile; + if (curIpFamily == IpFamilyV4 && WdsConnectionIPv4Handle == 0) + return 0; + if (curIpFamily == IpFamilyV6 && WdsConnectionIPv6Handle == 0) + return 0; + + dbg_time("%s WdsConnectionIPv%dHandle", __func__, curIpFamily == IpFamilyV4 ? 4 : 6); + + pRequest = ComposeQMUXMsg(QMIType, QMIWDS_STOP_NETWORK_INTERFACE_REQ , WdsStopNwInterfaceReq, &curIpFamily); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + if (curIpFamily == IpFamilyV4) + WdsConnectionIPv4Handle = 0; + else + WdsConnectionIPv6Handle = 0; + free(pResponse); + return 0; +} + +static int requestGetIPAddress(PROFILE_T *profile, int curIpFamily) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR pIpv4Addr; + PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR pIpv6Addr = NULL; + PQMIWDS_GET_RUNTIME_SETTINGS_TLV_MTU pMtu; + IPV4_T *pIpv4 = &profile->ipv4; + IPV6_T *pIpv6 = &profile->ipv6; + UCHAR QMIType = (curIpFamily == 0x04) ? QMUX_TYPE_WDS : QMUX_TYPE_WDS_IPV6; + PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV6_ADDR pPCSCFIpv6Addr; + PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV4_ADDR pPCSCFIpv4Addr; + + if (curIpFamily == IpFamilyV4) { + memset(pIpv4, 0x00, sizeof(IPV4_T)); + if (WdsConnectionIPv4Handle == 0) + return 0; + } else if (curIpFamily == IpFamilyV6) { + memset(pIpv6, 0x00, sizeof(IPV6_T)); + if (WdsConnectionIPv6Handle == 0) + return 0; + } + + pRequest = ComposeQMUXMsg(QMIType, QMIWDS_GET_RUNTIME_SETTINGS_REQ, WdsGetRuntimeSettingReq, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pPCSCFIpv6Addr = (PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV6_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x2e); // 0x2e - pcscf ipv6 address + if (pPCSCFIpv6Addr) { + if (pPCSCFIpv6Addr->PCSCFNumber == 1) { + UCHAR *PCSCFIpv6Addr1 = (UCHAR *)(pPCSCFIpv6Addr + 1); + memcpy(profile->PCSCFIpv6Addr1, PCSCFIpv6Addr1, 16); + }else if (pPCSCFIpv6Addr->PCSCFNumber == 2) { + UCHAR *PCSCFIpv6Addr1 = (UCHAR *)(pPCSCFIpv6Addr + 1); + UCHAR *PCSCFIpv6Addr2 = PCSCFIpv6Addr1 + 16; + memcpy(profile->PCSCFIpv6Addr1, PCSCFIpv6Addr1, 16); + memcpy(profile->PCSCFIpv6Addr2, PCSCFIpv6Addr2, 16); + } + } + + pPCSCFIpv4Addr = (PQMIWDS_GET_RUNNING_SETTINGS_PCSCF_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x23); // 0x23 - pcscf ipv4 address + if (pPCSCFIpv4Addr) { + if (pPCSCFIpv4Addr->PCSCFNumber == 1) { + UCHAR *PCSCFIpv4Addr1 = (UCHAR *)(pPCSCFIpv4Addr + 1); + memcpy(&profile->PCSCFIpv4Addr1, PCSCFIpv4Addr1, 4); + }else if (pPCSCFIpv4Addr->PCSCFNumber == 2) { + UCHAR *PCSCFIpv4Addr1 = (UCHAR *)(pPCSCFIpv4Addr + 1); + UCHAR *PCSCFIpv4Addr2 = PCSCFIpv4Addr1 + 4; + memcpy(&profile->PCSCFIpv4Addr1, PCSCFIpv4Addr1, 4); + memcpy(&profile->PCSCFIpv4Addr2, PCSCFIpv4Addr2, 4); + } + } + + pIpv4Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4PRIMARYDNS); + if (pIpv4Addr) { + pIpv4->DnsPrimary = pIpv4Addr->IPV4Address; + } + + pIpv4Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4SECONDARYDNS); + if (pIpv4Addr) { + pIpv4->DnsSecondary = pIpv4Addr->IPV4Address; + } + + pIpv4Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4GATEWAY); + if (pIpv4Addr) { + pIpv4->Gateway = pIpv4Addr->IPV4Address; + } + + pIpv4Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4SUBNET); + if (pIpv4Addr) { + pIpv4->SubnetMask = pIpv4Addr->IPV4Address; + } + + pIpv4Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV4_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV4); + if (pIpv4Addr) { + pIpv4->Address = pIpv4Addr->IPV4Address; + } + + pIpv6Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6PRIMARYDNS); + if (pIpv6Addr) { + memcpy(pIpv6->DnsPrimary, pIpv6Addr->IPV6Address, 16); + } + + pIpv6Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6SECONDARYDNS); + if (pIpv6Addr) { + memcpy(pIpv6->DnsSecondary, pIpv6Addr->IPV6Address, 16); + } + + pIpv6Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6GATEWAY); + if (pIpv6Addr) { + memcpy(pIpv6->Gateway, pIpv6Addr->IPV6Address, 16); + pIpv6->PrefixLengthGateway = pIpv6Addr->PrefixLength; + } + + pIpv6Addr = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_IPV6_ADDR)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_IPV6); + if (pIpv6Addr) { + memcpy(pIpv6->Address, pIpv6Addr->IPV6Address, 16); + pIpv6->PrefixLengthIPAddr = pIpv6Addr->PrefixLength; + } + + pMtu = (PQMIWDS_GET_RUNTIME_SETTINGS_TLV_MTU)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, QMIWDS_GET_RUNTIME_SETTINGS_TLV_TYPE_MTU); + if (pMtu) { + if (curIpFamily == IpFamilyV4) + pIpv4->Mtu = le32_to_cpu(pMtu->Mtu); + else + pIpv6->Mtu = le32_to_cpu(pMtu->Mtu); + } + + free(pResponse); + return 0; +} + +#ifdef CONFIG_APN +static int requestSetProfile(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + const char *new_apn = profile->apn ? profile->apn : ""; + const char *new_user = profile->user ? profile->user : ""; + const char *new_password = profile->password ? profile->password : ""; + const char *ipStr[] = {"IPV4", "NULL", "IPV6", "IPV4V6"}; + + dbg_time("%s[pdp:%d index:%d] %s/%s/%s/%d/%s", __func__, profile->pdp, profile->profile_index, profile->apn, profile->user, profile->password, profile->auth,ipStr[profile->iptype]); + if (!profile->profile_index) + return -1; + + if ( !strcmp(profile->old_apn, new_apn) && !strcmp(profile->old_user, new_user) + && !strcmp(profile->old_password, new_password) + && profile->old_iptype == profile->iptype + && profile->old_auth == profile->auth) + { + dbg_time("no need to set skip the rest"); + return 0; + } + + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_MODIFY_PROFILE_SETTINGS_REQ, WdsModifyProfileSettingsReq, profile); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + free(pResponse); + return 1; +} + +static int requestGetProfile(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + PQMIWDS_APNNAME pApnName; + PQMIWDS_USERNAME pUserName; + PQMIWDS_PASSWD pPassWd; + PQMIWDS_AUTH_PREFERENCE pAuthPref; + PQMIWDS_IPTYPE pIpType; + PQMIWDS_PDPCONTEXT pPdpContext; + PQMIWDS_PROFILELIST pProfileList; + + const char *ipStr[] = {"IPV4", "NULL", "IPV6", "IPV4V6"}; + + profile->old_apn[0] = profile->old_user[0] = profile->old_password[0] = '\0'; + profile->old_auth = 0; + profile->old_iptype = 0; + if (profile->enable_ipv4 && profile->enable_ipv6) + profile->iptype = 3; + else if (profile->enable_ipv6) + profile->iptype = 2; + else + profile->iptype = 0; + + if (!profile->pdp) + return 0; + +_re_check: + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_GET_PROFILE_LIST_REQ, WdsGetProfileListReqSend, profile); + err = QmiThreadSendQMI(pRequest, &pResponse);s_pResponse = malloc(le16_to_cpu(pResponse->QMIHdr.Length) + 1); + qmi_rsp_check_and_return(); + + pProfileList = (PQMIWDS_PROFILELIST)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + uint8 profile_indexs[42] = {0}; + uint8 profile_num = pProfileList->ProfileList[0]; + if(profile_num >= 1) + { + uint8 j = 0; + uint8 k = 2; + for(int i=0; iProfileList[k]; + if(pProfileList->ProfileList[++k] == 0) + k+=2; + else + k+=2+pProfileList->ProfileList[k]; + } + } + free(pResponse); + + for(int i=0; iprofile_index = profile_indexs[i]; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_GET_PROFILE_SETTINGS_REQ, WdsGetProfileSettingsReqSend, profile); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pPdpContext = (PQMIWDS_PDPCONTEXT)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x25); + if(pPdpContext->pdp_context == profile->pdp) + break; + else + free(pResponse); + + if(i == profile_num-1) + { + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS, QMIWDS_CREATE_PROFILE_REQ, WdsCreateProfileSettingsReqSend, profile); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + free(pResponse); + goto _re_check; + } + } + + + pApnName = (PQMIWDS_APNNAME)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x14); + pUserName = (PQMIWDS_USERNAME)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x1B); + pPassWd = (PQMIWDS_PASSWD)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x1C); + pAuthPref = (PQMIWDS_AUTH_PREFERENCE)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x1D); + pIpType = (PQMIWDS_IPTYPE)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + + if (pApnName/* && le16_to_cpu(pApnName->TLVLength)*/) + uchar2char(profile->old_apn, sizeof(profile->old_apn), &pApnName->ApnName, le16_to_cpu(pApnName->TLVLength)); + if (pUserName/* && pUserName->UserName*/) + uchar2char(profile->old_user, sizeof(profile->old_user), &pUserName->UserName, le16_to_cpu(pUserName->TLVLength)); + if (pPassWd/* && le16_to_cpu(pPassWd->TLVLength)*/) + uchar2char(profile->old_password, sizeof(profile->old_password), &pPassWd->Passwd, le16_to_cpu(pPassWd->TLVLength)); + if (pAuthPref/* && le16_to_cpu(pAuthPref->TLVLength)*/) { + profile->old_auth = pAuthPref->AuthPreference; + } + if (pIpType) { + profile->old_iptype = pIpType->IPType; + } + + dbg_time("%s[pdp:%d index:%d] %s/%s/%s/%d/%s", __func__, profile->pdp, profile->profile_index, profile->old_apn, profile->old_user, profile->old_password, profile->old_auth, ipStr[profile->old_iptype]); + + free(pResponse); + return 0; +} +#endif + +#ifdef CONFIG_SIGNALINFO +static int requestGetSignalInfo(void) +{ + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_SIG_INFO_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + + // CDMA + { + PQMINAS_SIG_INFO_CDMA_TLV_MSG ptlv = (PQMINAS_SIG_INFO_CDMA_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s CDMA: RSSI %d dBm, ECIO %.1lf dBm", __func__, + ptlv->rssi, (-0.5) * (double)ptlv->ecio); + } + } + + // HDR + { + PQMINAS_SIG_INFO_HDR_TLV_MSG ptlv = (PQMINAS_SIG_INFO_HDR_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s HDR: RSSI %d dBm, ECIO %.1lf dBm, IO %d dBm", __func__, + ptlv->rssi, (-0.5) * (double)ptlv->ecio, ptlv->io); + } + } + + // GSM + { + PQMINAS_SIG_INFO_GSM_TLV_MSG ptlv = (PQMINAS_SIG_INFO_GSM_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x12); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s GSM: RSSI %d dBm", __func__, ptlv->rssi); + } + } + + // WCDMA + { + PQMINAS_SIG_INFO_WCDMA_TLV_MSG ptlv = (PQMINAS_SIG_INFO_WCDMA_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x13); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s WCDMA: RSSI %d dBm, ECIO %.1lf dBm", __func__, + ptlv->rssi, (-0.5) * (double)ptlv->ecio); + } + } + + // LTE + { + PQMINAS_SIG_INFO_LTE_TLV_MSG ptlv = (PQMINAS_SIG_INFO_LTE_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x14); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s LTE: RSSI %d dBm, RSRQ %d dB, RSRP %d dBm, SNR %.1lf dB", __func__, + ptlv->rssi, ptlv->rsrq, ptlv->rsrp, (0.1) * (double)ptlv->snr); + } + } + + // TDSCDMA + { + PQMINAS_SIG_INFO_TDSCDMA_TLV_MSG ptlv = (PQMINAS_SIG_INFO_TDSCDMA_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x15); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s LTE: RSCP %d dBm", __func__, ptlv->rscp); + } + } + + // 5G_NSA + if (s_5g_type == WWAN_DATA_CLASS_5G_NSA) + { + PQMINAS_SIG_INFO_5G_NSA_TLV_MSG ptlv = (PQMINAS_SIG_INFO_5G_NSA_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x17); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s 5G_NSA: RSRP %d dBm, SNR %.1lf dB", __func__, ptlv->rsrp, (0.1) * (double)ptlv->snr); + } + } + + // 5G_SA + if (s_5g_type == WWAN_DATA_CLASS_5G_SA) + { + PQMINAS_SIG_INFO_5G_SA_TLV_MSG ptlv = (PQMINAS_SIG_INFO_5G_SA_TLV_MSG)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x18); + if (ptlv && ptlv->TLVLength) + { + dbg_time("%s 5G_SA: NR5G_RSRQ %d dB", __func__, ptlv->nr5g_rsrq); + } + } + + free(pResponse); + return 0; +} +#endif + +#ifdef CONFIG_VERSION +static int requestBaseBandVersion(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + PDEVICE_REV_ID revId; + int err; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_GET_DEVICE_REV_ID_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + revId = (PDEVICE_REV_ID)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + + if (revId && le16_to_cpu(revId->TLVLength)) + { + uchar2char(profile->BaseBandVersion, sizeof(profile->BaseBandVersion), &revId->RevisionID, le16_to_cpu(revId->TLVLength)); + dbg_time("%s %s", __func__, profile->BaseBandVersion); + } + + free(pResponse); + return 0; +} +#endif + +static USHORT DmsSetOperatingModeReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->SetOperatingModeReq.TLVType = 0x01; + pMUXMsg->SetOperatingModeReq.TLVLength = cpu_to_le16(1); + pMUXMsg->SetOperatingModeReq.OperatingMode = *((UCHAR *)arg); + + return sizeof(QMIDMS_SET_OPERATING_MODE_REQ_MSG); +} + +static USHORT UimSetCardSlotReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->UIMSetCardSlotReq.TLVType = 0x01; + pMUXMsg->UIMSetCardSlotReq.TLVLength = cpu_to_le16(1); + pMUXMsg->UIMSetCardSlotReq.slot = *((UCHAR *)arg); + + return sizeof(QMIUIM_SET_CARD_SLOT_REQ_MSG); +} + +static int requestRadioPower(int state) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + UCHAR OperatingMode = (!!state) ? DMS_OP_MODE_ONLINE : DMS_OP_MODE_LOW_POWER; + USHORT SimOp = (!!state) ? QMIUIM_POWER_UP : QMIUIM_POWER_DOWN; + UCHAR cardSlot = 0x01; + + dbg_time("%s(%d)", __func__, state); + + pRequest = ComposeQMUXMsg(QMUX_TYPE_DMS, QMIDMS_SET_OPERATING_MODE_REQ, DmsSetOperatingModeReq, &OperatingMode); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + free(pResponse); + + pRequest = ComposeQMUXMsg(QMUX_TYPE_UIM, SimOp, UimSetCardSlotReq, &cardSlot); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + free(pResponse); + + return 0; +} + +static USHORT WdaSetLoopBackReq(PQMUX_MSG pMUXMsg, void *arg) { + (void)arg; + pMUXMsg->SetLoopBackReq.loopback_state.TLVType = 0x01; + pMUXMsg->SetLoopBackReq.loopback_state.TLVLength = cpu_to_le16(1); + + pMUXMsg->SetLoopBackReq.replication_factor.TLVType = 0x10; + pMUXMsg->SetLoopBackReq.replication_factor.TLVLength = cpu_to_le16(4); + + return sizeof(QMI_WDA_SET_LOOPBACK_CONFIG_REQ_MSG); +} + +static int requestSetLoopBackState(UCHAR loopback_state, ULONG replication_factor) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + + dbg_time("%s(loopback_state=%d, replication_factor=%u)", __func__, loopback_state, replication_factor); + + pRequest = ComposeQMUXMsg(QMUX_TYPE_WDS_ADMIN, QMI_WDA_SET_LOOPBACK_CONFIG_REQ, WdaSetLoopBackReq, NULL); + pRequest->MUXMsg.SetLoopBackReq.loopback_state.TLVVaule = loopback_state; + pRequest->MUXMsg.SetLoopBackReq.replication_factor.TLVVaule = cpu_to_le16(replication_factor); + + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + free(pResponse); + return 0; +} + +#ifdef CONFIG_ENABLE_QOS +static USHORT QosSetBindMuxDataPort(PQMUX_MSG pMUXMsg, void *arg) { + PROFILE_T *profile = (PROFILE_T *)arg; + pMUXMsg->QosBindDataPortReq.EpIdTlv.TLVType = 0x10; + pMUXMsg->QosBindDataPortReq.EpIdTlv.TLVLength = cpu_to_le16(8); + pMUXMsg->QosBindDataPortReq.EpIdTlv.ep_type = cpu_to_le32(profile->rmnet_info.ep_type); + pMUXMsg->QosBindDataPortReq.EpIdTlv.iface_id = cpu_to_le32(profile->rmnet_info.iface_id); + pMUXMsg->QosBindDataPortReq.MuxIdTlv.TLVType = 0x11; + pMUXMsg->QosBindDataPortReq.MuxIdTlv.TLVLength = cpu_to_le16(1); + pMUXMsg->QosBindDataPortReq.MuxIdTlv.mux_id = profile->muxid; + return sizeof(QMI_QOS_BIND_DATA_PORT_REQ_MSG); +} + +#ifdef CONFIG_REG_QOS_IND +static USHORT QosIndRegReq(PQMUX_MSG pMUXMsg, void *arg) { + pMUXMsg->QosIndRegReq.ReportGlobalQosFlowTlv.TLVType = 0x10; + pMUXMsg->QosIndRegReq.ReportGlobalQosFlowTlv.TLVLength = cpu_to_le16(1); + pMUXMsg->QosIndRegReq.ReportGlobalQosFlowTlv.report_global_qos_flows = 1; + return sizeof(QMI_QOS_INDICATION_REGISTER_REQ_MSG); +} +#endif + +static int requestRegisterQos(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse = NULL; + PQMUX_MSG pMUXMsg; + int err; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_QOS, QMI_QOS_BIND_DATA_PORT_REQ , QosSetBindMuxDataPort, (void *)profile); + err = QmiThreadSendQMI(pRequest, &pResponse); + dbg_time("%s QosSetBindMuxDataPort", __func__); + qmi_rsp_check_and_return(); + if (pResponse) free(pResponse); + +#ifdef CONFIG_REG_QOS_IND + pRequest = ComposeQMUXMsg(QMUX_TYPE_QOS, QMI_QOS_INDICATION_REGISTER_REQ , QosIndRegReq, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + dbg_time("%s QosIndRegReq", __func__); + qmi_rsp_check_and_return(); + if (pResponse) free(pResponse); +#endif + return 0; +} + +#ifdef CONFIG_GET_QOS_INFO +UCHAR ql_get_qos_info_data_rate(PQCQMIMSG pResponse, void *max_data_rate) +{ + PQMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW qos_tx_granted_flow = NULL; + PQMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW qos_rx_granted_flow = NULL; + qos_tx_granted_flow = (PQMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if(qos_tx_granted_flow != NULL) + { + *(ULONG64 *)(max_data_rate) = le64_to_cpu(qos_tx_granted_flow->data_rate_max); + dbg_time("GET_QOS_INFO: tx_data_rate_max=%llu", *(ULONG64 *)(max_data_rate+0)); + } + else + dbg_time("GET_QOS_INFO: No qos_tx_granted_flow"); + qos_rx_granted_flow = (PQMI_QOS_GET_QOS_INFO_TLV_GRANTED_FLOW)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x12); + if(qos_rx_granted_flow != NULL) + { + *(ULONG64 *)(max_data_rate+sizeof(ULONG64)) = le64_to_cpu(qos_rx_granted_flow->data_rate_max); + dbg_time("GET_QOS_INFO: rx_data_rate_max=%llu", *(ULONG64 *)(max_data_rate+sizeof(ULONG64))); + } + else + dbg_time("GET_QOS_INFO: No qos_rx_granted_flow"); + if(qos_tx_granted_flow != NULL || qos_rx_granted_flow != NULL) + return 0; + else + return 1; +} + +static USHORT QosGetQosInfoReq(PQMUX_MSG pMUXMsg, void *arg) { + PROFILE_T *profile = (PROFILE_T *)arg; + pMUXMsg->QosGetQosInfoReq.QosIdTlv.TLVType = 0x01; + pMUXMsg->QosGetQosInfoReq.QosIdTlv.TLVLength = cpu_to_le16(4); + pMUXMsg->QosGetQosInfoReq.QosIdTlv.qos_id = cpu_to_le32(profile->qos_id); + return sizeof(QMI_QOS_GET_QOS_INFO_REQ_MSG); +} + +static int requestGetQosInfo(PROFILE_T *profile) { + PQCQMIMSG pRequest; + PQCQMIMSG pResponse = NULL; + PQMUX_MSG pMUXMsg; + int err; + + if(profile->qos_id == 0) + { + dbg_time("%s request not send: invalid qos_id", __func__); + return 0; + } + pRequest = ComposeQMUXMsg(QMUX_TYPE_QOS, QMI_QOS_GET_QOS_INFO_REQ , QosGetQosInfoReq, (void *)profile); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + if (pResponse) + { +#ifdef CONFIG_GET_QOS_DATA_RATE + ULONG64 max_data_rate[2] = {0}; + if(ql_get_qos_info_data_rate(pResponse, (void *)max_data_rate) == 0){} +#endif + free(pResponse); + } + return 0; +} +#endif //#ifdef CONFIG_GET_QOS_INFO + +#ifdef CONFIG_REG_QOS_IND +UCHAR ql_get_global_qos_flow_ind_qos_id(PQCQMIMSG pResponse, UINT *qos_id) +{ + PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE qos_flow_state = NULL; + qos_flow_state = (PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_STATE)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + if(qos_flow_state != NULL) + { + if(le32_to_cpu(qos_flow_state->state_change) == QOS_IND_FLOW_STATE_ACTIVATED && qos_flow_state->new_flow == 1) + { + *qos_id = le32_to_cpu(qos_flow_state->qos_id); + dbg_time("QMI_QOS_GLOBAL_QOS_FLOW_IND: qos_id=%u state=QOS_IND_FLOW_STATE_ACTIVATED", *qos_id); + } + return (qos_flow_state->new_flow); + } + return (0); +} + +#ifdef CONFIG_GET_QOS_DATA_RATE +UCHAR ql_get_global_qos_flow_ind_data_rate(PQCQMIMSG pResponse, void *max_data_rate) +{ + PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED qos_tx_flow_granted = NULL; + PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED qos_rx_flow_granted = NULL; + qos_tx_flow_granted = (PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x10); + if(qos_tx_flow_granted != NULL) + { + *(ULONG64 *)(max_data_rate) = le64_to_cpu(qos_tx_flow_granted->data_rate_max); + dbg_time("QMI_QOS_GLOBAL_QOS_FLOW_IND: tx_data_rate_max=%llu", *(ULONG64 *)(max_data_rate+0)); + } + else + dbg_time("QMI_QOS_GLOBAL_QOS_FLOW_IND: No qos_tx_flow_granted"); + qos_rx_flow_granted = (PQMI_QOS_GLOBAL_QOS_FLOW_TLV_FLOW_GRANTED)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if(qos_rx_flow_granted != NULL) + { + *(ULONG64 *)(max_data_rate+sizeof(ULONG64)) = le64_to_cpu(qos_rx_flow_granted->data_rate_max); + dbg_time("QMI_QOS_GLOBAL_QOS_FLOW_IND: rx_data_rate_max=%llu", *(ULONG64 *)(max_data_rate+sizeof(ULONG64))); + } + else + dbg_time("QMI_QOS_GLOBAL_QOS_FLOW_IND: No qos_rx_flow_granted"); + if(qos_tx_flow_granted != NULL || qos_rx_flow_granted != NULL) + return 0; + else + return 1; +} +#endif +#endif //#ifdef CONFIG_REG_QOS_IND +#endif //#ifdef CONFIG_ENABLE_QOS + +#ifdef CONFIG_CELLINFO +/* + at+qeng="servingcell" and at+qeng="neighbourcell" + https://gitlab.freedesktop.org/mobile-broadband/libqmi/-/blob/master/src/qmicli/qmicli-nas.c +*/ +static int nas_get_cell_location_info(void); +static int nas_get_rf_band_information(void); + +static int requestGetCellInfoList(void) { + dbg_time("%s", __func__); + nas_get_cell_location_info(); + nas_get_rf_band_information(); + return 0; +} +#endif + +const struct request_ops qmi_request_ops = { +#ifdef CONFIG_VERSION + .requestBaseBandVersion = requestBaseBandVersion, +#endif + .requestSetEthMode = requestSetEthMode, +#ifdef CONFIG_SIM + .requestGetSIMStatus = requestGetSIMStatus, + .requestEnterSimPin = requestEnterSimPin, +#endif +#ifdef CONFIG_IMSI_ICCID + .requestGetICCID = requestGetICCID, + .requestGetIMSI = requestGetIMSI, +#endif +#ifdef CONFIG_APN + .requestSetProfile = requestSetProfile, + .requestGetProfile = requestGetProfile, +#endif + .requestRegistrationState = requestRegistrationState, + .requestSetupDataCall = requestSetupDataCall, + .requestQueryDataCall = requestQueryDataCall, + .requestDeactivateDefaultPDP = requestDeactivateDefaultPDP, + .requestGetIPAddress = requestGetIPAddress, +#ifdef CONFIG_SIGNALINFO + .requestGetSignalInfo = requestGetSignalInfo, +#endif +#ifdef CONFIG_CELLINFO + .requestGetCellInfoList = requestGetCellInfoList, +#endif + .requestSetLoopBackState = requestSetLoopBackState, + .requestRadioPower = requestRadioPower, +#ifdef CONFIG_ENABLE_QOS + .requestRegisterQos = requestRegisterQos, +#endif +#ifdef CONFIG_GET_QOS_INFO + .requestGetQosInfo = requestGetQosInfo, +#endif +#ifdef CONFIG_COEX_WWAN_STATE + .requestGetCoexWWANState = requestGetCoexWWANState, +#endif +}; + +#ifdef CONFIG_CELLINFO +static char *str_from_bcd_plmn (uint8 plmn[3]) +{ + const char bcd_chars[] = "0123456789*#abc\0\0"; + static char str[12]; + int i; + int j = 0; + + for (i = 0; i < 3; i++) { + str[j] = bcd_chars[plmn[i]&0xF]; + if (str[j]) j++; + str[j] = bcd_chars[plmn[i]>>4]; + if (str[j]) j++; + } + + str[j++] = 0; + + return str; +} + +typedef struct { + UINT type; + const char *name; +} ENUM_NAME_T; + +#define enum_name(type) {type, #type} +#define N_ELEMENTS(arr) (sizeof (arr) / sizeof ((arr)[0])) + +static const ENUM_NAME_T QMI_NAS_ACTIVE_BAND_NAME[] = { + enum_name(QMI_NAS_ACTIVE_BAND_BC_0), + enum_name(QMI_NAS_ACTIVE_BAND_BC_1), + enum_name(QMI_NAS_ACTIVE_BAND_BC_2), + enum_name(QMI_NAS_ACTIVE_BAND_BC_3), + enum_name(QMI_NAS_ACTIVE_BAND_BC_4), + enum_name(QMI_NAS_ACTIVE_BAND_BC_5), + enum_name(QMI_NAS_ACTIVE_BAND_BC_6), + enum_name(QMI_NAS_ACTIVE_BAND_BC_7), + enum_name(QMI_NAS_ACTIVE_BAND_BC_8), + enum_name(QMI_NAS_ACTIVE_BAND_BC_9), + enum_name(QMI_NAS_ACTIVE_BAND_BC_10), + enum_name(QMI_NAS_ACTIVE_BAND_BC_11), + enum_name(QMI_NAS_ACTIVE_BAND_BC_12), + enum_name(QMI_NAS_ACTIVE_BAND_BC_13), + enum_name(QMI_NAS_ACTIVE_BAND_BC_14), + enum_name(QMI_NAS_ACTIVE_BAND_BC_15), + enum_name(QMI_NAS_ACTIVE_BAND_BC_16), + enum_name(QMI_NAS_ACTIVE_BAND_BC_17), + enum_name(QMI_NAS_ACTIVE_BAND_BC_18), + enum_name(QMI_NAS_ACTIVE_BAND_BC_19), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_450), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_480), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_750), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_850), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_900_EXTENDED), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_900_PRIMARY), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_900_RAILWAYS), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_DCS_1800), + enum_name(QMI_NAS_ACTIVE_BAND_GSM_PCS_1900), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_2100), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_PCS_1900), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_DCS_1800), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_1700_US), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_850), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_800), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_2600), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_900), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_1700_JAPAN), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_1500_JAPAN), + enum_name(QMI_NAS_ACTIVE_BAND_WCDMA_850_JAPAN), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_1), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_2), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_3), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_4), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_5), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_6), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_7), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_8), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_9), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_10), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_11), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_12), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_13), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_14), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_17), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_18), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_19), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_20), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_21), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_23), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_24), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_25), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_26), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_27), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_28), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_29), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_30), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_31), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_32), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_33), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_34), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_35), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_36), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_37), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_38), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_39), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_40), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_41), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_42), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_43), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_46), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_47), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_48), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_66), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_71), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_125), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_126), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_127), + enum_name(QMI_NAS_ACTIVE_BAND_EUTRAN_250), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_A), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_B), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_C), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_D), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_E), + enum_name(QMI_NAS_ACTIVE_BAND_TDSCDMA_F), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_1 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_2 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_3 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_5 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_7 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_8 ), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_20), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_28), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_38), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_41), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_50), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_51), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_66), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_70), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_71), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_74), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_75), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_76), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_77), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_78), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_79), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_80), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_81), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_82), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_83), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_84), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_85), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_257), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_258), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_259), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_260), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_261), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_12), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_25), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_34), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_39), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_40), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_65), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_86), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_48), + enum_name(QMI_NAS_ACTIVE_BAND_NR5G_BAND_14), +}; + +static const char *qmi_nas_radio_interface_get_string(uint8 radio_if) +{ + const char *str = NULL; + + switch (radio_if) { + case QMI_NAS_RADIO_INTERFACE_CDMA_1X: str = "cdma-1x"; break; + case QMI_NAS_RADIO_INTERFACE_CDMA_1XEVDO: str = "cdma-1xevdo"; break; + case QMI_NAS_RADIO_INTERFACE_AMPS: str = "amps"; break; + case QMI_NAS_RADIO_INTERFACE_GSM: str = "gsm"; break; + case QMI_NAS_RADIO_INTERFACE_UMTS: str = "umts"; break; + case QMI_NAS_RADIO_INTERFACE_LTE: str = "lte"; break; + case QMI_NAS_RADIO_INTERFACE_TD_SCDMA: str = "td-scdma"; break; + case QMI_NAS_RADIO_INTERFACE_5GNR: str = "5gnr"; break; + default: str = NULL; break; + } + + return str ? str : "unknown"; +} + +static const char *qmi_nas_active_band_get_string(uint32 active_band) +{ + size_t i; + + for (i = 0; i < N_ELEMENTS(QMI_NAS_ACTIVE_BAND_NAME); i++) { + if (active_band == QMI_NAS_ACTIVE_BAND_NAME[i].type) + return QMI_NAS_ACTIVE_BAND_NAME[i].name + strlen("QMI_NAS_ACTIVE_BAND_"); + } + + return "unknown"; +} + +typedef struct { + uint16 min; + uint16 max; + const char *name; +} EarfcnRange; + +/* http://niviuk.free.fr/lte_band.php */ +static const EarfcnRange earfcn_ranges[] = { + { 0, 599, "E-UTRA band 1: 2100" }, + { 600, 1199, "E-UTRA band 2: 1900 PCS" }, + { 1200, 1949, "E-UTRA band 3: 1800+" }, + { 1950, 2399, "E-UTRA band 4: AWS-1" }, + { 2400, 2649, "E-UTRA band 5: 850" }, + { 2650, 2749, "E-UTRA band 6: UMTS only" }, + { 2750, 3449, "E-UTRA band 7: 2600" }, + { 3450, 3799, "E-UTRA band 8: 900" }, + { 3800, 4149, "E-UTRA band 9: 1800" }, + { 4150, 4749, "E-UTRA band 10: AWS-1+" }, + { 4750, 4999, "E-UTRA band 11: 1500 Lower" }, + { 5000, 5179, "E-UTRA band 12: 700 a" }, + { 5180, 5279, "E-UTRA band 13: 700 c" }, + { 5280, 5379, "E-UTRA band 14: 700 PS" }, + { 5730, 5849, "E-UTRA band 17: 700 b" }, + { 5850, 5999, "E-UTRA band 18: 800 Lower" }, + { 6000, 6149, "E-UTRA band 19: 800 Upper" }, + { 6150, 6449, "E-UTRA band 20: 800 DD" }, + { 6450, 6599, "E-UTRA band 21: 1500 Upper" }, + { 6600, 7399, "E-UTRA band 22: 3500" }, + { 7500, 7699, "E-UTRA band 23: 2000 S-band" }, + { 7700, 8039, "E-UTRA band 24: 1600 L-band" }, + { 8040, 8689, "E-UTRA band 25: 1900+" }, + { 8690, 9039, "E-UTRA band 26: 850+" }, + { 9040, 9209, "E-UTRA band 27: 800 SMR" }, + { 9210, 9659, "E-UTRA band 28: 700 APT" }, + { 9660, 9769, "E-UTRA band 29: 700 d" }, + { 9770, 9869, "E-UTRA band 30: 2300 WCS" }, + { 9870, 9919, "E-UTRA band 31: 450" }, + { 9920, 10359, "E-UTRA band 32: 1500 L-band" }, + { 36000, 36199, "E-UTRA band 33: TD 1900" }, + { 36200, 36349, "E-UTRA band 34: TD 2000" }, + { 36350, 36949, "E-UTRA band 35: TD PCS Lower" }, + { 36950, 37549, "E-UTRA band 36: TD PCS Upper" }, + { 37550, 37749, "E-UTRA band 37: TD PCS Center" }, + { 37750, 38249, "E-UTRA band 38: TD 2600" }, + { 38250, 38649, "E-UTRA band 39: TD 1900+" }, + { 38650, 39649, "E-UTRA band 40: TD 2300" }, + { 39650, 41589, "E-UTRA band 41: TD 2500" }, + { 41590, 43589, "E-UTRA band 42: TD 3500" }, + { 43590, 45589, "E-UTRA band 43: TD 3700" }, + { 45590, 46589, "E-UTRA band 44: TD 700" }, +}; + +static const char * earfcn_to_eutra_band_string (uint16 earfcn) +{ + size_t i; + + for (i = 0; i < N_ELEMENTS (earfcn_ranges); i++) { + if (earfcn <= earfcn_ranges[i].max && earfcn >= earfcn_ranges[i].min) + return earfcn_ranges[i].name; + } + + return "unknown"; +} + +static int nas_get_cell_location_info(void) +{ + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + PQMI_TLV pV; + int err; + int i, j; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_CELL_LOCATION_INFO_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + pV = (PQMI_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x2E); + if (pV && pV->TLVLength) { + printf ("5GNR ARFCN: '%u'\n", pV->u32); + } + + { + NasGetCellLocationNr5gServingCell *ptlv = (NasGetCellLocationNr5gServingCell *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x2F); + if (ptlv && ptlv->TLVLength) + { + printf ("5GNR cell information:\n" + "\tPLMN: '%s'\n" + "\tTracking Area Code: '%u'\n" + "\tGlobal Cell ID: '%" PRIu64 "'\n" + "\tPhysical Cell ID: '%u'\n" + "\tRSRQ: '%.1lf dB'\n" + "\tRSRP: '%.1lf dBm'\n" + "\tSNR: '%.1lf dB'\n", + str_from_bcd_plmn(ptlv->plmn), + ptlv->tac[0]<<16 | ptlv->tac[1]<<8 | ptlv->tac[2] , + ptlv->global_cell_id, + ptlv->physical_cell_id, + (0.1) * ((double)ptlv->rsrq), + (0.1) * ((double)ptlv->rsrp), + (0.1) * ((double)ptlv->snr)); + } + } + + { + NasGetCellLocationLteInfoIntrafrequency *ptlv = (NasGetCellLocationLteInfoIntrafrequency *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x13); + if (ptlv && ptlv->TLVLength) + { + printf ("Intrafrequency LTE Info:\n" + "\tUE In Idle: '%s'\n" + "\tPLMN: '%s'\n" + "\tTracking Area Code: '%u'\n" + "\tGlobal Cell ID: '%u'\n" + "\tEUTRA Absolute RF Channel Number: '%u' (%s)\n" + "\tServing Cell ID: '%u'\n", + ptlv->ue_in_idle ? "yes" : "no", + str_from_bcd_plmn(ptlv->plmn), + ptlv->tracking_area_code, + ptlv->global_cell_id, + ptlv->absolute_rf_channel_number, earfcn_to_eutra_band_string(ptlv->absolute_rf_channel_number), + ptlv->serving_cell_id); + + if (ptlv->ue_in_idle) + printf ("\tCell Reselection Priority: '%u'\n" + "\tS Non Intra Search Threshold: '%u'\n" + "\tServing Cell Low Threshold: '%u'\n" + "\tS Intra Search Threshold: '%u'\n", + ptlv->cell_reselection_priority, + ptlv->s_non_intra_search_threshold, + ptlv->serving_cell_low_threshold, + ptlv->s_intra_search_threshold); + + + for (i = 0; i < ptlv->cells_len; i++) { + NasGetCellLocationLteInfoCell *cell = &ptlv->cells_array[i]; + + printf ("\tCell [%u]:\n" + "\t\tPhysical Cell ID: '%u'\n" + "\t\tRSRQ: '%.1lf' dB\n" + "\t\tRSRP: '%.1lf' dBm\n" + "\t\tRSSI: '%.1lf' dBm\n", + i, + cell->physical_cell_id, + (double) cell->rsrq * 0.1, + (double) cell->rsrp * 0.1, + (double) cell->rssi * 0.1); + + if (ptlv->ue_in_idle) + printf ("\t\tCell Selection RX Level: '%d'\n", + cell->cell_selection_rx_level); + } + } + } + + { + NasGetCellLocationLteInfoInterfrequency *ptlv = (NasGetCellLocationLteInfoInterfrequency *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x14); + if (ptlv && ptlv->TLVLength) + { + int off = offsetof(NasGetCellLocationLteInfoInterfrequency, freqs[0]); + printf ("Interfrequency LTE Info:\n" + "\tUE In Idle: '%s'\n", ptlv->ue_in_idle ? "yes" : "no"); + + for (i = 0; i < ptlv->freqs_len; i++) { + NasGetCellLocationLteInfoInterfrequencyFrequencyElement *freq = (((void *)ptlv) + off); + + off += sizeof(*freq); + printf ("\tFrequency [%u]:\n" + "\t\tEUTRA Absolute RF Channel Number: '%u' (%s)\n" + "\t\tSelection RX Level Low Threshold: '%u'\n" + "\t\tCell Selection RX Level High Threshold: '%u'\n", + i, + freq->eutra_absolute_rf_channel_number, earfcn_to_eutra_band_string(freq->eutra_absolute_rf_channel_number), + freq->cell_selection_rx_level_low_threshold, + freq->cell_selection_rx_level_high_threshold); + if (ptlv->ue_in_idle) + printf ("\t\tCell Reselection Priority: '%u'\n", + freq->cell_reselection_priority); + + + for (j = 0; j < freq->cells_len; j++) { + NasGetCellLocationLteInfoCell *cell = &freq->cells_array[j]; + + off += sizeof(*cell); + printf ("\t\tCell [%u]:\n" + "\t\t\tPhysical Cell ID: '%u'\n" + "\t\t\tRSRQ: '%.1lf' dB\n" + "\t\t\tRSRP: '%.1lf' dBm\n" + "\t\t\tRSSI: '%.1lf' dBm\n" + "\t\t\tCell Selection RX Level: '%u'\n", + j, + cell->physical_cell_id, + (double) cell->rsrq * 0.1, + (double) cell->rsrp * 0.1, + (double) cell->rssi * 0.1, + cell->cell_selection_rx_level); + } + } + } + } + + pV = (PQMI_TLV)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x1E); + if (pV && pV->TLVLength) { + if (pV->u32 == 0xFFFFFFFF) + printf ("LTE Timing Advance: 'unavailable'\n"); + else + printf ("LTE Timing Advance: '%u'\n", pV->u32); + } + + free(pResponse); + return 0; +} + +static int nas_get_rf_band_information(void) +{ + PQCQMIMSG pRequest; + PQCQMIMSG pResponse; + PQMUX_MSG pMUXMsg; + int err; + int i; + + pRequest = ComposeQMUXMsg(QMUX_TYPE_NAS, QMINAS_GET_RF_BAND_INFO_REQ, NULL, NULL); + err = QmiThreadSendQMI(pRequest, &pResponse); + qmi_rsp_check_and_return(); + + { + NasGetRfBandInfoList *ptlv = (NasGetRfBandInfoList *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x01); + if (ptlv && ptlv->TLVLength) + { + printf ("Band Information:\n"); + for (i = 0; i < ptlv->num_instances; i++) { + NasGetRfBandInfo *band = &ptlv->bands_array[i]; + + printf ("\tRadio Interface: '%s'\n" + "\tActive Band Class: '%s'\n" + "\tActive Channel: '%u'\n", + qmi_nas_radio_interface_get_string (band->radio_if), + qmi_nas_active_band_get_string (band->active_band), + band->active_channel); + } + } + } + + { + NasGetRfBandInfoExtendedList *ptlv = (NasGetRfBandInfoExtendedList *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x11); + if (ptlv && ptlv->TLVLength) + { + printf ("Band Information (Extended):\n"); + for (i = 0; i < ptlv->num_instances; i++) { + NasGetRfBandInfoExtended *band = &ptlv->bands_array[i]; + + printf ("\tRadio Interface: '%s'\n" + "\tActive Band Class: '%s'\n" + "\tActive Channel: '%u'\n", + qmi_nas_radio_interface_get_string (band->radio_if), + qmi_nas_active_band_get_string (band->active_band), + band->active_channel); + } + } + } + + { + NasGetRfBandInfoBandWidthList *ptlv = (NasGetRfBandInfoBandWidthList *)GetTLV(&pResponse->MUXMsg.QMUXMsgHdr, 0x12); + if (ptlv && ptlv->TLVLength) + { + printf ("Bandwidth:\n"); + for (i = 0; i < ptlv->num_instances; i++) { + NasGetRfBandInfoBandWidth *band = &ptlv->bands_array[i]; + + printf ("\tRadio Interface: '%s'\n" + "\tBandwidth: '%u'\n", + qmi_nas_radio_interface_get_string (band->radio_if), + (band->bandwidth)); + } + } + } + + free(pResponse); + return 0; +} +#endif diff --git a/wwan/app/quectel_cm_5G/src/QMIThread.h b/wwan/app/quectel_cm_5G/src/QMIThread.h new file mode 100644 index 0000000..bbf2d4b --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/QMIThread.h @@ -0,0 +1,423 @@ +#ifndef __QMI_THREAD_H__ +#define __QMI_THREAD_H__ + +#define CONFIG_GOBINET +#define CONFIG_QMIWWAN +#define CONFIG_SIM +#define CONFIG_APN +#define CONFIG_VERSION +//#define CONFIG_SIGNALINFO +//#define CONFIG_CELLINFO +//#define CONFIG_COEX_WWAN_STATE +#define CONFIG_DEFAULT_PDP 1 +//#define CONFIG_IMSI_ICCID +#define QUECTEL_UL_DATA_AGG +//#define QUECTEL_QMI_MERGE +//#define REBOOT_SIM_CARD_WHEN_APN_CHANGE +//#define REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS 60 //unit is seconds +//#define CONFIG_QRTR +//#define CONFIG_ENABLE_QOS +//#define CONFIG_REG_QOS_IND +//#define CONFIG_GET_QOS_INFO +//#define CONFIG_GET_QOS_DATA_RATE + +#if (defined(CONFIG_REG_QOS_IND) || defined(CONFIG_GET_QOS_INFO) || defined(CONFIG_GET_QOS_DATA_RATE)) +#ifndef CONFIG_REG_QOS_IND +#define CONFIG_REG_QOS_IND +#endif +#ifndef CONFIG_ENABLE_QOS +#define CONFIG_ENABLE_QOS +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qendian.h" +#include "QCQMI.h" +#include "QCQCTL.h" +#include "QCQMUX.h" +#include "util.h" + +#define DEVICE_CLASS_UNKNOWN 0 +#define DEVICE_CLASS_CDMA 1 +#define DEVICE_CLASS_GSM 2 + +#define WWAN_DATA_CLASS_NONE 0x00000000 +#define WWAN_DATA_CLASS_GPRS 0x00000001 +#define WWAN_DATA_CLASS_EDGE 0x00000002 /* EGPRS */ +#define WWAN_DATA_CLASS_UMTS 0x00000004 +#define WWAN_DATA_CLASS_HSDPA 0x00000008 +#define WWAN_DATA_CLASS_HSUPA 0x00000010 +#define WWAN_DATA_CLASS_LTE 0x00000020 +#define WWAN_DATA_CLASS_5G_NSA 0x00000040 +#define WWAN_DATA_CLASS_5G_SA 0x00000080 +#define WWAN_DATA_CLASS_1XRTT 0x00010000 +#define WWAN_DATA_CLASS_1XEVDO 0x00020000 +#define WWAN_DATA_CLASS_1XEVDO_REVA 0x00040000 +#define WWAN_DATA_CLASS_1XEVDV 0x00080000 +#define WWAN_DATA_CLASS_3XRTT 0x00100000 +#define WWAN_DATA_CLASS_1XEVDO_REVB 0x00200000 /* for future use */ +#define WWAN_DATA_CLASS_UMB 0x00400000 +#define WWAN_DATA_CLASS_CUSTOM 0x80000000 + +struct wwan_data_class_str { + ULONG class; + const char *str; +}; + +#pragma pack(push, 1) +typedef struct __IPV4 { + uint32_t Address; + uint32_t Gateway; + uint32_t SubnetMask; + uint32_t DnsPrimary; + uint32_t DnsSecondary; + uint32_t Mtu; +} IPV4_T; + +typedef struct __IPV6 { + UCHAR Address[16]; + UCHAR Gateway[16]; + UCHAR SubnetMask[16]; + UCHAR DnsPrimary[16]; + UCHAR DnsSecondary[16]; + UCHAR PrefixLengthIPAddr; + UCHAR PrefixLengthGateway; + ULONG Mtu; +} IPV6_T; + +typedef struct { + UINT size; + UINT rx_urb_size; + UINT ep_type; + UINT iface_id; + UINT MuxId; + UINT ul_data_aggregation_max_datagrams; //0x17 + UINT ul_data_aggregation_max_size ;//0x18 + UINT dl_minimum_padding; //0x1A +} QMAP_SETTING; + +//Configured downlink data aggregationprotocol +#define WDA_DL_DATA_AGG_DISABLED (0x00) //DL data aggregation is disabled (default) +#define WDA_DL_DATA_AGG_TLP_ENABLED (0x01) // DL TLP is enabled +#define WDA_DL_DATA_AGG_QC_NCM_ENABLED (0x02) // DL QC_NCM isenabled +#define WDA_DL_DATA_AGG_MBIM_ENABLED (0x03) // DL MBIM isenabled +#define WDA_DL_DATA_AGG_RNDIS_ENABLED (0x04) // DL RNDIS is enabled +#define WDA_DL_DATA_AGG_QMAP_ENABLED (0x05) // DL QMAP isenabled +#define WDA_DL_DATA_AGG_QMAP_V2_ENABLED (0x06) // DL QMAP V2 is enabled +#define WDA_DL_DATA_AGG_QMAP_V3_ENABLED (0x07) // DL QMAP V3 is enabled +#define WDA_DL_DATA_AGG_QMAP_V4_ENABLED (0x08) // DL QMAP V4 is enabled +#define WDA_DL_DATA_AGG_QMAP_V5_ENABLED (0x09) // DL QMAP V5 is enabled + +typedef struct { + unsigned int size; + unsigned int rx_urb_size; + unsigned int ep_type; + unsigned int iface_id; + unsigned int qmap_mode; + unsigned int qmap_version; + unsigned int dl_minimum_padding; + char ifname[8][16]; + unsigned char mux_id[8]; +} RMNET_INFO; + +#define IpFamilyV4 (0x04) +#define IpFamilyV6 (0x06) + +struct __PROFILE; +struct qmi_device_ops { + int (*init)(struct __PROFILE *profile); + int (*deinit)(void); + int (*send)(PQCQMIMSG pRequest); + void* (*read)(void *pData); +}; +#ifdef CONFIG_QRTR +extern const struct qmi_device_ops qrtr_qmidev_ops; +#endif +extern const struct qmi_device_ops gobi_qmidev_ops; +extern const struct qmi_device_ops qmiwwan_qmidev_ops; +extern const struct qmi_device_ops mbim_dev_ops; +extern const struct qmi_device_ops atc_dev_ops; +extern int (*qmidev_send)(PQCQMIMSG pRequest); + +struct usb_device_info { + int idVendor; + int idProduct; + int busnum; + int devnum; + int bNumInterfaces; +}; + +struct usb_interface_info { + int bNumEndpoints; + int bInterfaceClass; + int bInterfaceSubClass; + int bInterfaceProtocol; + char driver[32]; +}; + +#define LIBQMI_PROXY "qmi-proxy" //src/libqmi-glib/qmi-proxy.h +#define LIBMBIM_PROXY "mbim-proxy" +#define QUECTEL_QMI_PROXY "quectel-qmi-proxy" +#define QUECTEL_MBIM_PROXY "quectel-mbim-proxy" +#define QUECTEL_ATC_PROXY "quectel-atc-proxy" +#define QUECTEL_QRTR_PROXY "quectel-qrtr-proxy" + +#ifndef bool +#define bool uint8_t +#endif +struct request_ops; +typedef struct __PROFILE { + //user input start + const char *apn; + const char *user; + const char *password; + int auth; + int iptype; + const char *pincode; + char proxy[32]; + int pdp;//pdp_context + int profile_index;//profile_index + int enable_bridge; + bool enable_ipv4; + bool enable_ipv6; + bool no_dhcp; + const char *logfile; + const char *usblogfile; + char expect_adapter[32]; + int kill_pdp; + int replication_factor; + //user input end + + char qmichannel[32]; + char usbnet_adapter[32]; + char qmapnet_adapter[32]; + char driver_name[32]; + int qmap_mode; + int qmap_size; + int qmap_version; + int curIpFamily; + int rawIP; + int muxid; +#ifdef CONFIG_ENABLE_QOS + UINT qos_id; +#endif + int wda_client; + uint32_t udhcpc_ip; + IPV4_T ipv4; + IPV6_T ipv6; + UINT PCSCFIpv4Addr1; + UINT PCSCFIpv4Addr2; + UCHAR PCSCFIpv6Addr1[16]; + UCHAR PCSCFIpv6Addr2[16]; + bool reattach_flag; + int hardware_interface; + int software_interface; + + struct usb_device_info usb_dev; + struct usb_interface_info usb_intf; + + int usbmon_fd; + FILE *usbmon_logfile_fp; + bool loopback_state; + + char BaseBandVersion[64]; + char old_apn[64]; + char old_user[64]; + char old_password[64]; + int old_auth; + int old_iptype; + + const struct qmi_device_ops *qmi_ops; + const struct request_ops *request_ops; + RMNET_INFO rmnet_info; +} PROFILE_T; + +#ifdef QUECTEL_QMI_MERGE +#define MERGE_PACKET_IDENTITY 0x2c7c +#define MERGE_PACKET_VERSION 0x0001 +#define MERGE_PACKET_MAX_PAYLOAD_SIZE 56 +typedef struct __QMI_MSG_HEADER { + uint16_t idenity; + uint16_t version; + uint16_t cur_len; + uint16_t total_len; +} QMI_MSG_HEADER; + +typedef struct __QMI_MSG_PACKET { + QMI_MSG_HEADER header; + uint16_t len; + char buf[4096]; +} QMI_MSG_PACKET; +#endif + +typedef enum { + SIM_ABSENT = 0, + SIM_NOT_READY = 1, + SIM_READY = 2, /* SIM_READY means the radio state is RADIO_STATE_SIM_READY */ + SIM_PIN = 3, + SIM_PUK = 4, + SIM_NETWORK_PERSONALIZATION = 5, + SIM_BAD = 6, +} SIM_Status; + +#pragma pack(pop) + +#define WDM_DEFAULT_BUFSIZE 256 +#define RIL_REQUEST_QUIT 0x1000 +#define RIL_INDICATE_DEVICE_CONNECTED 0x1002 +#define RIL_INDICATE_DEVICE_DISCONNECTED 0x1003 +#define RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED 0x1004 +#define RIL_UNSOL_DATA_CALL_LIST_CHANGED 0x1005 +#define MODEM_REPORT_RESET_EVENT 0x1006 +#define RIL_UNSOL_LOOPBACK_CONFIG_IND 0x1007 +#ifdef CONFIG_REG_QOS_IND +#define RIL_UNSOL_GLOBAL_QOS_FLOW_IND_QOS_ID 0x1008 +#endif + +extern pthread_mutex_t cm_command_mutex; +extern pthread_cond_t cm_command_cond; +extern unsigned int cm_recv_buf[1024]; +extern int cm_open_dev(const char *dev); +extern int cm_open_proxy(const char *name); +extern int pthread_cond_timeout_np(pthread_cond_t *cond, pthread_mutex_t * mutex, unsigned msecs); +extern int QmiThreadSendQMITimeout(PQCQMIMSG pRequest, PQCQMIMSG *ppResponse, unsigned msecs, const char *funcname); +#define QmiThreadSendQMI(pRequest, ppResponse) QmiThreadSendQMITimeout(pRequest, ppResponse, 30 * 1000, __func__) +extern void QmiThreadRecvQMI(PQCQMIMSG pResponse); +extern void udhcpc_start(PROFILE_T *profile); +extern void udhcpc_stop(PROFILE_T *profile); +extern void ql_set_driver_link_state(PROFILE_T *profile, int link_state); +extern void ql_set_driver_qmap_setting(PROFILE_T *profile, QMAP_SETTING *qmap_settings); +extern void ql_get_driver_rmnet_info(PROFILE_T *profile, RMNET_INFO *rmnet_info); +extern void dump_qmi(void *dataBuffer, int dataLen); +extern void qmidevice_send_event_to_main(int triger_event); +extern void qmidevice_send_event_to_main_ext(int triger_event, void *data, unsigned len); +extern uint8_t qmi_over_mbim_get_client_id(uint8_t QMIType); +extern uint8_t qmi_over_mbim_release_client_id(uint8_t QMIType, uint8_t ClientId); +#ifdef CONFIG_REG_QOS_IND +extern UCHAR ql_get_global_qos_flow_ind_qos_id(PQCQMIMSG pResponse, UINT *qos_id); +#endif +#ifdef CONFIG_GET_QOS_DATA_RATE +extern UCHAR ql_get_global_qos_flow_ind_data_rate(PQCQMIMSG pResponse, void *max_data_rate); +#endif + +struct request_ops { + int (*requestBaseBandVersion)(PROFILE_T *profile); + int (*requestSetEthMode)(PROFILE_T *profile); + int (*requestSetLoopBackState)(UCHAR loopback_state, ULONG replication_factor); + int (*requestGetSIMStatus)(SIM_Status *pSIMStatus); + int (*requestEnterSimPin)(const char *pPinCode); + int (*requestSetProfile)(PROFILE_T *profile); // 1 ~ success and apn change, 0 ~ success and no apn change, -1 ~ fail + int (*requestGetProfile)(PROFILE_T *profile); + int (*requestRegistrationState)(UCHAR *pPSAttachedState); + int (*requestSetupDataCall)(PROFILE_T *profile, int curIpFamily); + int (*requestQueryDataCall)(UCHAR *pConnectionStatus, int curIpFamily); + int (*requestDeactivateDefaultPDP)(PROFILE_T *profile, int curIpFamily); + int (*requestGetIPAddress)(PROFILE_T *profile, int curIpFamily); + int (*requestGetSignalInfo)(void); + int (*requestGetCellInfoList)(void); + int (*requestGetICCID)(void); + int (*requestGetIMSI)(void); + int (*requestRadioPower)(int state); + int (*requestRegisterQos)(PROFILE_T *profile); + int (*requestGetQosInfo)(PROFILE_T *profile); + int (*requestGetCoexWWANState)(void); +}; +extern const struct request_ops qmi_request_ops; +extern const struct request_ops mbim_request_ops; +extern const struct request_ops atc_request_ops; + +extern int get_driver_type(PROFILE_T *profile); +extern BOOL qmidevice_detect(char *qmichannel, char *usbnet_adapter, unsigned bufsize, PROFILE_T *profile); +int mhidevice_detect(char *qmichannel, char *usbnet_adapter, PROFILE_T *profile); +int atdevice_detect(char *atchannel, char *usbnet_adapter, PROFILE_T *profile); +extern int ql_bridge_mode_detect(PROFILE_T *profile); +extern int ql_enable_qmi_wwan_rawip_mode(PROFILE_T *profile); +extern int ql_qmap_mode_detect(PROFILE_T *profile); +#ifdef CONFIG_QRTR +extern int rtrmnet_ctl_create_vnd(char *devname, char *vndname, uint8_t muxid, + uint32_t qmap_version, uint32_t ul_agg_cnt, uint32_t ul_agg_size); +#endif + +#define qmidev_is_gobinet(_qmichannel) (strncmp(_qmichannel, "/dev/qcqmi", strlen("/dev/qcqmi")) == 0) +#define qmidev_is_qmiwwan(_qmichannel) (strncmp(_qmichannel, "/dev/cdc-wdm", strlen("/dev/cdc-wdm")) == 0) +#define qmidev_is_pciemhi(_qmichannel) (strncmp(_qmichannel, "/dev/mhi_", strlen("/dev/mhi_")) == 0) + +#define driver_is_qmi(_drv_name) (strncasecmp(_drv_name, "qmi_wwan", strlen("qmi_wwan")) == 0) +#define driver_is_mbim(_drv_name) (strncasecmp(_drv_name, "cdc_mbim", strlen("cdc_mbim")) == 0) + +extern FILE *logfilefp; +extern int debug_qmi; +extern int qmidevice_control_fd[2]; +extern int g_donot_exit_when_modem_hangup; +extern void update_resolv_conf(int iptype, const char *ifname, const char *dns1, const char *dns2); +void update_ipv4_address(const char *ifname, const char *ip, const char *gw, unsigned prefix); +void update_ipv6_address(const char *ifname, const char *ip, const char *gw, unsigned prefix); +int reattach_driver(PROFILE_T *profile); +extern void no_trunc_strncpy(char *dest, const char *src, size_t dest_size); + +enum +{ + DRV_INVALID, + SOFTWARE_QMI, + SOFTWARE_MBIM, + SOFTWARE_ECM_RNDIS_NCM, + SOFTWARE_QRTR, + HARDWARE_PCIE, + HARDWARE_USB, +}; + +enum +{ + SIG_EVENT_START, + SIG_EVENT_CHECK, + SIG_EVENT_STOP, +}; + +typedef enum +{ + DMS_OP_MODE_ONLINE, + DMS_OP_MODE_LOW_POWER, + DMS_OP_MODE_FACTORY_TEST_MODE, + DMS_OP_MODE_OFFLINE, + DMS_OP_MODE_RESETTING, + DMS_OP_MODE_SHUTTING_DOWN, + DMS_OP_MODE_PERSISTENT_LOW_POWER, + DMS_OP_MODE_MODE_ONLY_LOW_POWER, + DMS_OP_MODE_NET_TEST_GW, +}Device_operating_mode; + +#ifdef CM_DEBUG +#define dbg_time(fmt, args...) do { \ + fprintf(stdout, "[%15s-%04d: %s] " fmt "\n", __FILE__, __LINE__, get_time(), ##args); \ + fflush(stdout);\ + if (logfilefp) fprintf(logfilefp, "[%s-%04d: %s] " fmt "\n", __FILE__, __LINE__, get_time(), ##args); \ +} while(0) +#else +#define dbg_time(fmt, args...) do { \ + fprintf(stdout, "[%s] " fmt "\n", get_time(), ##args); \ + fflush(stdout);\ + if (logfilefp) fprintf(logfilefp, "[%s] " fmt "\n", get_time(), ##args); \ +} while(0) +#endif +#endif diff --git a/wwan/app/quectel_cm_5G/src/QmiWwanCM.c b/wwan/app/quectel_cm_5G/src/QmiWwanCM.c new file mode 100644 index 0000000..bea11c6 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/QmiWwanCM.c @@ -0,0 +1,459 @@ +/****************************************************************************** + @file QmiWwanCM.c + @brief QMI WWAN connectivity manager. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ + +#include +#include +#include +#include +#include +#include "QMIThread.h" + +#ifdef CONFIG_QMIWWAN +static int cdc_wdm_fd = -1; +static UCHAR qmiclientId[QMUX_TYPE_ALL]; + +static UCHAR GetQCTLTransactionId(void) { + static int TransactionId = 0; + if (++TransactionId > 0xFF) + TransactionId = 1; + return TransactionId; +} + +typedef USHORT (*CUSTOMQCTL)(PQMICTL_MSG pCTLMsg, void *arg); + +static PQCQMIMSG ComposeQCTLMsg(USHORT QMICTLType, CUSTOMQCTL customQctlMsgFunction, void *arg) { + UCHAR QMIBuf[WDM_DEFAULT_BUFSIZE]; + PQCQMIMSG pRequest = (PQCQMIMSG)QMIBuf; + int Length; + + pRequest->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pRequest->QMIHdr.CtlFlags = 0x00; + pRequest->QMIHdr.QMIType = QMUX_TYPE_CTL; + pRequest->QMIHdr.ClientId= 0x00; + + pRequest->CTLMsg.QMICTLMsgHdr.CtlFlags = QMICTL_FLAG_REQUEST; + pRequest->CTLMsg.QMICTLMsgHdr.TransactionId = GetQCTLTransactionId(); + pRequest->CTLMsg.QMICTLMsgHdr.QMICTLType = cpu_to_le16(QMICTLType); + if (customQctlMsgFunction) + pRequest->CTLMsg.QMICTLMsgHdr.Length = cpu_to_le16(customQctlMsgFunction(&pRequest->CTLMsg, arg) - sizeof(QCQMICTL_MSG_HDR)); + else + pRequest->CTLMsg.QMICTLMsgHdr.Length = cpu_to_le16(0x0000); + + pRequest->QMIHdr.Length = cpu_to_le16(le16_to_cpu(pRequest->CTLMsg.QMICTLMsgHdr.Length) + sizeof(QCQMICTL_MSG_HDR) + sizeof(QCQMI_HDR) - 1); + Length = le16_to_cpu(pRequest->QMIHdr.Length) + 1; + + pRequest = (PQCQMIMSG)malloc(Length); + if (pRequest == NULL) { + dbg_time("%s fail to malloc", __func__); + } else { + memcpy(pRequest, QMIBuf, Length); + } + + return pRequest; +} + +static USHORT CtlGetVersionReq(PQMICTL_MSG QCTLMsg, void *arg) +{ + (void)arg; + QCTLMsg->GetVersionReq.TLVType = QCTLV_TYPE_REQUIRED_PARAMETER; + QCTLMsg->GetVersionReq.TLVLength = cpu_to_le16(0x0001); + QCTLMsg->GetVersionReq.QMUXTypes = QMUX_TYPE_ALL; + return sizeof(QMICTL_GET_VERSION_REQ_MSG); +} + +static USHORT CtlGetClientIdReq(PQMICTL_MSG QCTLMsg, void *arg) { + QCTLMsg->GetClientIdReq.TLVType = QCTLV_TYPE_REQUIRED_PARAMETER; + QCTLMsg->GetClientIdReq.TLVLength = cpu_to_le16(0x0001); + QCTLMsg->GetClientIdReq.QMIType = ((UCHAR *)arg)[0]; + return sizeof(QMICTL_GET_CLIENT_ID_REQ_MSG); +} + +static USHORT CtlReleaseClientIdReq(PQMICTL_MSG QCTLMsg, void *arg) { + QCTLMsg->ReleaseClientIdReq.TLVType = QCTLV_TYPE_REQUIRED_PARAMETER; + QCTLMsg->ReleaseClientIdReq.TLVLength = cpu_to_le16(0x0002); + QCTLMsg->ReleaseClientIdReq.QMIType = ((UCHAR *)arg)[0]; + QCTLMsg->ReleaseClientIdReq.ClientId = ((UCHAR *)arg)[1] ; + return sizeof(QMICTL_RELEASE_CLIENT_ID_REQ_MSG); +} + +static USHORT CtlLibQmiProxyOpenReq(PQMICTL_MSG QCTLMsg, void *arg) +{ + (void)arg; + const char *device_path = (const char *)(arg); + QCTLMsg->LibQmiProxyOpenReq.TLVType = 0x01; + QCTLMsg->LibQmiProxyOpenReq.TLVLength = cpu_to_le16(strlen(device_path)); + //strcpy(QCTLMsg->LibQmiProxyOpenReq.device_path, device_path); + //__builtin___strcpy_chk + memcpy(QCTLMsg->LibQmiProxyOpenReq.device_path, device_path, strlen(device_path)); + return sizeof(QMICTL_LIBQMI_PROXY_OPEN_MSG) + (strlen(device_path)); +} + +static int libqmi_proxy_open(const char *cdc_wdm) { + int ret; + PQCQMIMSG pResponse; + + ret = QmiThreadSendQMI(ComposeQCTLMsg(QMI_MESSAGE_CTL_INTERNAL_PROXY_OPEN, + CtlLibQmiProxyOpenReq, (void *)cdc_wdm), &pResponse); + if (!ret && pResponse + && pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXResult == 0 + && pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXError == 0) { + ret = 0; + } + else { + return -1; + } + + if (pResponse) + free(pResponse); + + return ret; +} + +static int QmiWwanSendQMI(PQCQMIMSG pRequest) { + struct pollfd pollfds[]= {{cdc_wdm_fd, POLLOUT, 0}}; + int ret; + + if (cdc_wdm_fd == -1) { + dbg_time("%s cdc_wdm_fd = -1", __func__); + return -ENODEV; + } + + if (pRequest->QMIHdr.QMIType != QMUX_TYPE_CTL) { + pRequest->QMIHdr.ClientId = qmiclientId[pRequest->QMIHdr.QMIType]; + if (pRequest->QMIHdr.ClientId == 0) { + dbg_time("QMIType %d has no clientID", pRequest->QMIHdr.QMIType); + return -ENODEV; + } + + if (pRequest->QMIHdr.QMIType == QMUX_TYPE_WDS_IPV6) + pRequest->QMIHdr.QMIType = QMUX_TYPE_WDS; + } + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while ((ret < 0) && (errno == EINTR)); + + if (pollfds[0].revents & POLLOUT) { + ssize_t nwrites = le16_to_cpu(pRequest->QMIHdr.Length) + 1; + ret = write(cdc_wdm_fd, pRequest, nwrites); + if (ret == nwrites) { + ret = 0; + } else { + dbg_time("%s write=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + } + } else { + dbg_time("%s poll=%d, revents = 0x%x, errno: %d (%s)", __func__, ret, pollfds[0].revents, errno, strerror(errno)); + } + + return ret; +} + +static UCHAR QmiWwanGetClientID(UCHAR QMIType) { + PQCQMIMSG pResponse; + + QmiThreadSendQMI(ComposeQCTLMsg(QMICTL_GET_CLIENT_ID_REQ, CtlGetClientIdReq, &QMIType), &pResponse); + + if (pResponse) { + USHORT QMUXResult = cpu_to_le16(pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXResult); // QMI_RESULT_SUCCESS + USHORT QMUXError = cpu_to_le16(pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXError); // QMI_ERR_INVALID_ARG + //UCHAR QMIType = pResponse->CTLMsg.GetClientIdRsp.QMIType; + UCHAR ClientId = pResponse->CTLMsg.GetClientIdRsp.ClientId; + + if (!QMUXResult && !QMUXError && (QMIType == pResponse->CTLMsg.GetClientIdRsp.QMIType)) { + switch (QMIType) { + case QMUX_TYPE_WDS: dbg_time("Get clientWDS = %d", ClientId); break; + case QMUX_TYPE_DMS: dbg_time("Get clientDMS = %d", ClientId); break; + case QMUX_TYPE_NAS: dbg_time("Get clientNAS = %d", ClientId); break; + case QMUX_TYPE_QOS: dbg_time("Get clientQOS = %d", ClientId); break; + case QMUX_TYPE_WMS: dbg_time("Get clientWMS = %d", ClientId); break; + case QMUX_TYPE_PDS: dbg_time("Get clientPDS = %d", ClientId); break; + case QMUX_TYPE_UIM: dbg_time("Get clientUIM = %d", ClientId); break; + case QMUX_TYPE_COEX: dbg_time("Get clientCOEX = %d", ClientId); break; + case QMUX_TYPE_WDS_ADMIN: dbg_time("Get clientWDA = %d", ClientId); + break; + default: break; + } + return ClientId; + } + } + return 0; +} + +static int QmiWwanReleaseClientID(QMI_SERVICE_TYPE QMIType, UCHAR ClientId) { + UCHAR argv[] = {QMIType, ClientId}; + QmiThreadSendQMI(ComposeQCTLMsg(QMICTL_RELEASE_CLIENT_ID_REQ, CtlReleaseClientIdReq, argv), NULL); + return 0; +} + +static int QmiWwanInit(PROFILE_T *profile) { + unsigned i; + int ret; + PQCQMIMSG pResponse; + + if (profile->proxy[0] && !strcmp(profile->proxy, LIBQMI_PROXY)) { + ret = libqmi_proxy_open(profile->qmichannel); + if (ret) + return ret; + } + + if (!profile->proxy[0]) { + for (i = 0; i < 10; i++) { + ret = QmiThreadSendQMITimeout(ComposeQCTLMsg(QMICTL_SYNC_REQ, NULL, NULL), NULL, 1 * 1000, __func__); + if (!ret) + break; + sleep(1); + } + if (ret) + return ret; + } + + QmiThreadSendQMI(ComposeQCTLMsg(QMICTL_GET_VERSION_REQ, CtlGetVersionReq, NULL), &pResponse); + if (profile->qmap_mode) { + if (pResponse) { + if (pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXResult == 0 && pResponse->CTLMsg.QMICTLMsgHdrRsp.QMUXError == 0) { + uint8_t NumElements = 0; + + for (NumElements = 0; NumElements < pResponse->CTLMsg.GetVersionRsp.NumElements; NumElements++) { +#if 0 + dbg_time("QMUXType = %02x Version = %d.%d", + pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].QMUXType, + pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].MajorVersion, + pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].MinorVersion); +#endif + if (pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].QMUXType == QMUX_TYPE_WDS_ADMIN) + profile->qmap_version = (pResponse->CTLMsg.GetVersionRsp.TypeVersion[NumElements].MinorVersion > 16); + } + } + } + } + if (pResponse) free(pResponse); + qmiclientId[QMUX_TYPE_WDS] = QmiWwanGetClientID(QMUX_TYPE_WDS); + if (profile->enable_ipv6) + qmiclientId[QMUX_TYPE_WDS_IPV6] = QmiWwanGetClientID(QMUX_TYPE_WDS); + qmiclientId[QMUX_TYPE_DMS] = QmiWwanGetClientID(QMUX_TYPE_DMS); + qmiclientId[QMUX_TYPE_NAS] = QmiWwanGetClientID(QMUX_TYPE_NAS); + qmiclientId[QMUX_TYPE_UIM] = QmiWwanGetClientID(QMUX_TYPE_UIM); + qmiclientId[QMUX_TYPE_WDS_ADMIN] = QmiWwanGetClientID(QMUX_TYPE_WDS_ADMIN); +#ifdef CONFIG_COEX_WWAN_STATE + qmiclientId[QMUX_TYPE_COEX] = QmiWwanGetClientID(QMUX_TYPE_COEX); +#endif +#ifdef CONFIG_ENABLE_QOS + qmiclientId[QMUX_TYPE_QOS] = QmiWwanGetClientID(QMUX_TYPE_QOS); +#endif + profile->wda_client = qmiclientId[QMUX_TYPE_WDS_ADMIN]; + + return 0; +} + +static int QmiWwanDeInit(void) { + unsigned int i; + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] != 0) + { + QmiWwanReleaseClientID((QMUX_TYPE_WDS_IPV6 == i ? QMUX_TYPE_WDS : i), qmiclientId[i]); + qmiclientId[i] = 0; + } + } + + return 0; +} + +static ssize_t qmi_proxy_read (int fd, void *buf, size_t size) { + ssize_t nreads; + PQCQMI_HDR pHdr = (PQCQMI_HDR)buf; + + nreads = read(fd, pHdr, sizeof(QCQMI_HDR)); + if (nreads == sizeof(QCQMI_HDR) && le16_to_cpu(pHdr->Length) < size) { + nreads += read(fd, pHdr+1, le16_to_cpu(pHdr->Length) + 1 - sizeof(QCQMI_HDR)); + } + + return nreads; +} + +#ifdef QUECTEL_QMI_MERGE +static int QmiWwanMergeQmiRsp(void *buf, ssize_t *src_size) { + static QMI_MSG_PACKET s_QMIPacket; + QMI_MSG_HEADER *header = NULL; + ssize_t size = *src_size; + + if((uint16_t)size < sizeof(QMI_MSG_HEADER)) + return -1; + + header = (QMI_MSG_HEADER *)buf; + if(le16_to_cpu(header->idenity) != MERGE_PACKET_IDENTITY || le16_to_cpu(header->version) != MERGE_PACKET_VERSION || le16_to_cpu(header->cur_len) > le16_to_cpu(header->total_len)) + return -1; + + if(le16_to_cpu(header->cur_len) == le16_to_cpu(header->total_len)) { + *src_size = le16_to_cpu(header->total_len); + memcpy(buf, buf + sizeof(QMI_MSG_HEADER), *src_size); + s_QMIPacket.len = 0; + return 0; + } + + memcpy(s_QMIPacket.buf + s_QMIPacket.len, buf + sizeof(QMI_MSG_HEADER), le16_to_cpu(header->cur_len)); + s_QMIPacket.len += le16_to_cpu(header->cur_len); + + if (le16_to_cpu(header->cur_len) < MERGE_PACKET_MAX_PAYLOAD_SIZE || s_QMIPacket.len >= le16_to_cpu(header->total_len)) { + memcpy(buf, s_QMIPacket.buf, s_QMIPacket.len); + *src_size = s_QMIPacket.len; + s_QMIPacket.len = 0; + return 0; + } + + return -1; +} +#endif + +static void * QmiWwanThread(void *pData) { + PROFILE_T *profile = (PROFILE_T *)pData; + const char *cdc_wdm = (const char *)profile->qmichannel; + int wait_for_request_quit = 0; + char num = cdc_wdm[strlen(cdc_wdm)-1]; + + if (profile->proxy[0]) { + if (!strncmp(profile->proxy, QUECTEL_QMI_PROXY, strlen(QUECTEL_QMI_PROXY))) { + snprintf(profile->proxy, sizeof(profile->proxy), "%s%c", QUECTEL_QMI_PROXY, num); + } + } + else if (!strncmp(cdc_wdm, "/dev/mhi_IPCR", strlen("/dev/mhi_IPCR"))) { + snprintf(profile->proxy, sizeof(profile->proxy), "%s%c", QUECTEL_QRTR_PROXY, num); + } + else if (profile->qmap_mode > 1) { + snprintf(profile->proxy, sizeof(profile->proxy), "%s%c", QUECTEL_QMI_PROXY, num); + } + + if (profile->proxy[0]) + cdc_wdm_fd = cm_open_proxy(profile->proxy); + else + cdc_wdm_fd = cm_open_dev(cdc_wdm); + + if (cdc_wdm_fd == -1) { + dbg_time("%s Failed to open %s, errno: %d (%s)", __func__, cdc_wdm, errno, strerror(errno)); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + pthread_exit(NULL); + return NULL; + } + + dbg_time("cdc_wdm_fd = %d", cdc_wdm_fd); + + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); + while (1) { + struct pollfd pollfds[] = {{qmidevice_control_fd[1], POLLIN, 0}, {cdc_wdm_fd, POLLIN, 0}}; + int ne, ret, nevents = sizeof(pollfds)/sizeof(pollfds[0]); + + do { + ret = poll(pollfds, nevents, wait_for_request_quit ? 1000 : -1); + } while ((ret < 0) && (errno == EINTR)); + + if (ret == 0 && wait_for_request_quit) { + QmiThreadRecvQMI(NULL); + continue; + } + + if (ret <= 0) { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + break; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + //dbg_time("{%d, %x, %x}", pollfds[ne].fd, pollfds[ne].events, pollfds[ne].revents); + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dbg_time("%s poll err/hup/inval", __func__); + dbg_time("poll fd = %d, events = 0x%04x", fd, revents); + if (fd == cdc_wdm_fd) { + } else { + } + if (revents & (POLLHUP | POLLNVAL)) //EC20 bug, Can get POLLERR + goto __QmiWwanThread_quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (fd == qmidevice_control_fd[1]) { + int triger_event; + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + //DBG("triger_event = 0x%x", triger_event); + switch (triger_event) { + case RIL_REQUEST_QUIT: + goto __QmiWwanThread_quit; + break; + case SIG_EVENT_STOP: + wait_for_request_quit = 1; + break; + default: + break; + } + } + } + + if (fd == cdc_wdm_fd) { + ssize_t nreads; + PQCQMIMSG pResponse = (PQCQMIMSG)cm_recv_buf; + + if (!profile->proxy[0]) + nreads = read(fd, cm_recv_buf, sizeof(cm_recv_buf)); + else + nreads = qmi_proxy_read(fd, cm_recv_buf, sizeof(cm_recv_buf)); + //dbg_time("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + if (nreads <= 0) { + dbg_time("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + break; + } +#ifdef QUECTEL_QMI_MERGE + if((profile->qmap_mode == 0 || profile->qmap_mode == 1) && QmiWwanMergeQmiRsp(cm_recv_buf, &nreads)) + continue; +#endif + if (nreads != (le16_to_cpu(pResponse->QMIHdr.Length) + 1)) { + dbg_time("%s nreads=%d, pQCQMI->QMIHdr.Length = %d", __func__, (int)nreads, le16_to_cpu(pResponse->QMIHdr.Length)); + continue; + } + + QmiThreadRecvQMI(pResponse); + } + } + } + +__QmiWwanThread_quit: + if (cdc_wdm_fd != -1) { close(cdc_wdm_fd); cdc_wdm_fd = -1; } + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI() + dbg_time("%s exit", __func__); + pthread_exit(NULL); + return NULL; +} + +const struct qmi_device_ops qmiwwan_qmidev_ops = { + .init = QmiWwanInit, + .deinit = QmiWwanDeInit, + .send = QmiWwanSendQMI, + .read = QmiWwanThread, +}; + +uint8_t qmi_over_mbim_get_client_id(uint8_t QMIType) { + return QmiWwanGetClientID(QMIType); +} + +uint8_t qmi_over_mbim_release_client_id(uint8_t QMIType, uint8_t ClientId) { + return QmiWwanReleaseClientID(QMIType, ClientId); +} +#endif + diff --git a/wwan/app/quectel_cm_5G/src/ReleaseNote.txt b/wwan/app/quectel_cm_5G/src/ReleaseNote.txt new file mode 100644 index 0000000..a5eb77d --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/ReleaseNote.txt @@ -0,0 +1,339 @@ +Release Notes + +[V1.6.5] +Date: 7/3/2023 +enhancement: + 1. Fix the issue of qmi client id leakage caused by kill 9 killing the client of quectel-qmi-proxy + 2. Fix wds_ipv6 client ID can't be released issue + 3. Fix wds_ipv6 client ID can't be released issue + 4. Resolve PDP_ Context&Profile_ The issue of index mixing + 5. Add parameter - d to obtain IP and DNS information through qmi + 6. Fix mbim dialing. When the user does not specify apn through - s, prompt the user and exit the dialing program + 7. Prioritize the use of IP commands for optimization, and use ifconfig if not available + 8. Optimize and add/remove copyright +fix: + + +[V1.6.4] +Date: 9/7/2022 +enhancement: + 1. set cflags as -Wall -Wextra -Werror -O1, and fix compile errors + 2. some code refactoring + 3. add quectel-qrtr-proxy +fix: + 1. netmask error when use ifconfig on little endian cpu + +[V1.6.2] +Date: 11/18/2021 +enhancement: + 1. support 'LTE && WiFi Coexistence Solution via QMI'. + If customer want use this feature, need enable CONFIG_COEX_WWAN_STATE in QMIThread.h + +[V1.6.1] +Date: 7/20/2021 +enhancement: + 1. add requestGetCellInfoList requestRadioPower + 2. add QMI OVER MBIM + 3. support qrtr and rmnet + 4. support RG500U PCIE + 5. add qos service && get qos flow data_rate_max func +fix: + 1. mbim: increase mbim open timeout to 3 seconds. some modem take long time for open cmd. + 2. support MsChapV2 + 3. mbim: invalid memory access when only get one DNS + 4. some bug fix for use AT Command to setup data call + +[V1.6.0.26] +Date: 4/22/2021 +enhancement: + 1. add lots of log file to show how to use this tool + 2. support pcie mhi multiple call + 3. at command: support EC200U/EC200T/EC200S/RG801H/RG500U/ +fix: + 1. mbim-proxy: fix errors on big endian cpu, ignore mbim open/close cmd from quectel-cm + +[V1.6.0.25] +Date: 4/8/2021 +enhancement: +fix: + 1. fix compile error when use gcc 9.3.0 + 2. fix yocto 'QA Issue: No GNU_HASH in the ELF binary' + +[V1.6.0.24] +Date: 3/9/2021 +enhancement: + 1. '-p [quectel-][qmi|mbim]-proxy', can connect to quectel/libqmi/libmbim's proxy, even only one data + 2. set variable s_9x07 as 1 (from 0), most of modems are base on MDM90x7 and later QCOM chip. +fix: + 1. define CHAR as signed char + 2. mofidy Makefile to generate more compile warnnings and fix them + +[V1.6.0.23] +Date: 2/26/2021 +enhancement: + 1. support 'AT+QNETDEVCTL' (not release) +fix: + 1. modify help/usage + 2. fix some memroy access error in mbim-cm.c + +[V1.6.0.22] +Date: 2/4/2021 +enhancement: + 1. support connect to libqmi's qmi-proxy + 2. only allow ' 0/1/2/none/pap/chap' for auth of '-s' + 3. '-m iface-idx' bind QMAP data call to wwan0_ +fix: + +[V1.6.0.21] +Date: 1/28/2021 +enhancement: + 1. print 5G signal +fix: + 1. fix compile errors: -Werror=format-truncation= + +[V1.6.0.20] +Date: 12/29/2020 +enhancement: + 1. Code refactoring + 2. support 'AT+QNETDEVCTL' (not release) +fix: + +[V1.6.0.19] +Date: 12/4/2020 +enhancement: + 1. if 'udhcpc's default.script' missed, directy set ip/dns/route by 'ip' co,mand +fix: + +[V1.6.0.18] +Date: 12/4/2020 +enhancement: + 1. Code refactoring +fix: + +[V1.6.0.17] +Date: 8/25/2020 +enhancement: + 1. support MBIM multi-call + 2. support unisoc RG500U mbim + 3. QUECTEL_QMI_MERGE: some SOC can not read more then 64 bytes (QMI)data via USB Endpoint 0 +fix: + +[V1.6.0.15] +Date: 7/24/2020 +enhancement: +fix: + 1. QMAP multi-call, AT+CFUN=4 then AT+CFUN=1, only one call can obtain IP by DHCP + +[V1.6.0.14] +Date: 6/10/2020 +enhancement: + 1. support X55's GobiNet LOOPBACK +fix: + 1. very old uclib do not support htole32 and pthread_condattr_setclock + 2. pthread_cond_wait tv_nsec >= 1000000000U is wrong + 3. do not close socket in udhcpc.c ifc_get_addr() + +[V1.6.0.13] +Date: 6/9/2020 +enhancement: + 1. add some example for openwrt, marco 'QL_OPENWER_NETWORK_SETUP' +fix: + +[V1.6.0.12] +Date: 5/29/2020 +enhancement: +fix: + 1. some EM12's usb-net-qmi/mbim interface is at 8 (not 4) + +[V1.6.0.11] +Date: 5/28/2020 +enhancement: +fix: + 1. fix mbim debug on Big Endian CPU + +[V1.6.0.10] +Date: 5/25/2020 +enhancement: +fix: + 1. set QMAP .ul_data_aggregation_max_datagrams to 11 (from 16) + +[V1.6.0.9] +Date: 5/22/2020 +enhancement: +fix: + 1. dial fail when register to 5G-SA + +[V1.6.0.8] +Date: 4/30/2020 +enhancement: + 1. support '-b' to seletc brige mode +fix: + +[V1.6.0.7] +Date: 4/29/2020 +enhancement: + 1. support QMAP multi-call for qmi_wwan_q and pcie_mhi 's rmnet driver +fix: + +[V1.6.0.6] +Date: 4/20/2020 +enhancement: + 1. support '-k pdn_idx' to hangup call '-n pdn_idx' +fix: + 1. fix set dl_minimum_padding as 0, modems do not support this featrue + +[V1.6.0.5] +Date: 4/10/2020 +enhancement: + 1. support X55's QMAPV5 for PCIE +fix: + +[V1.6.0.3] +Date: 4/8/2020 +enhancement: + 1. support multi-modems all use multi-data-calls +fix: + +[V1.6.0.2] +Date: 4/7/2020 +enhancement: + 1. support X55's QMAPV5 for USB +fix: + +[V1.6.0.1] +Date: 4/1/2020 +enhancement: + 1. support QMAP UL AGG (multi data call) +fix: + 1. some EM12's usb-net-qmi/mbim interface is at 8 (not 4) + +[V1.5.9] +Date: 3/4/2020 +enhancement: + 1. support pcie mhi multi-APN data call + 3. support QMAP UL AGG (single data call) +fix: + 1. set 4 bytes aligned for mbim parameters, or cause dial mbim call fail + +[V1.5.8] +Date: 2/18/2020 +enhancement: + 1. support '-l 14' X55's loopback function +fix: + +[V1.5.7] +Date: 2/6/2020 +enhancement: + 1. support '-u usbmon_log_file' to catch usbmon log +fix: + +[V1.5.6] +Date: 1/20/202 +enhancement: + 1. show driver name and version + 2. support PCSCF + 3. support bridge in mbim +fix: + +[V1.5.5] +Date: 12/31/2019 +enhancement: +fix: + 1. fix some memory access bug in mbim-cm.c + +[WCDMA<E_QConnectManager_Linux&Android_V1.5.4] +Date: 12/17/2019 +enhancement: + 1. Add copyright + 2. auto detect pcie mhi /dev/mhi* +fix: + +[WCDMA<E_QConnectManager_Linux&Android_V1.5.3] +Date: 2019/12/11 +enhancement: +1. support show SignalInfo, controlled by macro CONFIG_SIGNALINFO +2. support show 5G_NSA/5G_NA +3. support Microsoft Extend MBIM message +fix: +1. quectel-qmi-proxy bugs on Big-Endian CPU + +[WCDMA<E_QConnectManager_Linux&Android_V1.5.2] +Date: 12/2/2019 +enhancement: + 1. support requestGetSignalInfo() +fix: + +[WCDMA<E_QConnectManager_Linux&Android_V1.4.1] +Date: 10/23/2019 +enhancement: + 1. support QMI_CTL_REVOKE_CLIENT_ID_IND (Quectel define QMI) + 2. add copyright +fix: + 1. remove SIGUSR + +[WCDMA<E_QConnectManager_Linux&Android_V1.3.10] +Date: 10/14/2019 +enhancement: + 1. increase retry interval +fix: + +[WCDMA<E_QConnectManager_Linux&Android_V1.2.1] +Date: 2019/02/26 +enhancement: +1. Implement help message. + +root@ubuntu:# ./quectel-CM -h +[02-26_10:39:21:353] Usage: ./quectel-CM [options] +[02-26_10:39:21:353] -s [apn [user password auth]] Set apn/user/password/auth get from your network provider +[02-26_10:39:21:353] -p pincode Verify sim card pin if sim card is locked +[02-26_10:39:21:353] -f logfilename Save log message of this program to file +[02-26_10:39:21:353] -i interface Specify network interface(default auto-detect) +[02-26_10:39:21:353] -4 IPv4 protocol +[02-26_10:39:21:353] -6 IPv6 protocol +[02-26_10:39:21:353] -m muxID Specify muxid when set multi-pdn data connection. +[02-26_10:39:21:353] -n channelID Specify channelID when set multi-pdn data connection(default 1). +[02-26_10:39:21:353] [Examples] +[02-26_10:39:21:353] Example 1: ./quectel-CM +[02-26_10:39:21:353] Example 2: ./quectel-CM -s 3gnet +[02-26_10:39:21:353] Example 3: ./quectel-CM -s 3gnet carl 1234 0 -p 1234 -f gobinet_log.txt +root@ubuntu:# +2. Support bridge mode when set multi-pdn data connections. +3. Host device can access network in bridge mode. + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.46] +Date: 2019/02/18 +enhancement: +1. support only IPV6 data call. quectel-CM now support three dialing methods: IPV4 only, IPV6 only, IPV4V6. + ./quectel-CM -4(or no argument) only IPV4 + -6 only IPV6 + -4 -6 IPV4 && IPV6 + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.45] +Date: 2018/09/13 +enhancement: +1. support EG12 PCIE interface + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.44] +Date: 2018/09/10 +enhancement: +1. support setup IPV4&IPV6 data call. + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.43] +[WCDMA<E_QConnectManager_Linux&Android_V1.1.42] +Date: 2018/08/29 +enhancement: +1. support QMI_WWAN's QMAP fucntion and bridge mode, please contact Quectel FAE to get qmi_wwan.c patch. + when enable QMI_WWAN's QMAP IP Mux function, must run 'quectel-qmi-proxy -d /dev/cdc-wdmX' before quectel-CM + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.41] +Date: 2018/05/24 +enhancement: +1. fix a cdma data call error + +[WCDMA<E_QConnectManager_Linux&Android_V1.1.40] +Date: 2018/05/12 +enhancement: +1. support GobiNet's QMAP fucntion and bridge mode. + 'Quectel_WCDMA<E_Linux&Android_GobiNet_Driver_V1.3.5' and later version is required to use QMAP and bridge mode. + for detail, please refer to GobiNet Driver + diff --git a/wwan/app/quectel_cm_5G/src/at_tok.c b/wwan/app/quectel_cm_5G/src/at_tok.c new file mode 100644 index 0000000..6736cc8 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/at_tok.c @@ -0,0 +1,283 @@ +/* //device/system/reference-ril/at_tok.c +** +** Copyright 2006, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#include "at_tok.h" +#include +#include +#include +#include +#include + +/** + * Starts tokenizing an AT response string + * returns -1 if this is not a valid response string, 0 on success. + * updates *p_cur with current position + */ +int at_tok_start(char **p_cur) +{ + if (*p_cur == NULL) { + return -1; + } + + // skip prefix + // consume "^[^:]:" + + *p_cur = strchr(*p_cur, ':'); + + if (*p_cur == NULL) { + return -1; + } + + (*p_cur)++; + + return 0; +} + +static void skipWhiteSpace(char **p_cur) +{ + if (*p_cur == NULL) return; + + while (**p_cur != '\0' && isspace(**p_cur)) { + (*p_cur)++; + } +} + +static void skipNextComma(char **p_cur) +{ + if (*p_cur == NULL) return; + + while (**p_cur != '\0' && **p_cur != ',') { + (*p_cur)++; + } + + if (**p_cur == ',') { + (*p_cur)++; + } +} + +static char * nextTok(char **p_cur) +{ + char *ret = NULL; + + skipWhiteSpace(p_cur); + + if (*p_cur == NULL) { + ret = NULL; + } else if (**p_cur == '"') { + (*p_cur)++; + ret = strsep(p_cur, "\""); + skipNextComma(p_cur); + } else { + ret = strsep(p_cur, ","); + } + + return ret; +} + + +/** + * Parses the next integer in the AT response line and places it in *p_out + * returns 0 on success and -1 on fail + * updates *p_cur + * "base" is the same as the base param in strtol + */ + +static int at_tok_nextint_base(char **p_cur, int *p_out, int base, int uns) +{ + char *ret; + + if (*p_cur == NULL) { + return -1; + } + + ret = nextTok(p_cur); + + if (ret == NULL) { + return -1; + } else { + long l; + char *end; + + if (uns) + l = strtoul(ret, &end, base); + else + l = strtol(ret, &end, base); + + *p_out = (int)l; + + if (end == ret) { + return -1; + } + } + + return 0; +} + +/** + * Parses the next base 10 integer in the AT response line + * and places it in *p_out + * returns 0 on success and -1 on fail + * updates *p_cur + */ +int at_tok_nextint(char **p_cur, int *p_out) +{ + return at_tok_nextint_base(p_cur, p_out, 10, 0); +} + +/** + * Parses the next base 16 integer in the AT response line + * and places it in *p_out + * returns 0 on success and -1 on fail + * updates *p_cur + */ +int at_tok_nexthexint(char **p_cur, int *p_out) +{ + return at_tok_nextint_base(p_cur, p_out, 16, 1); +} + +int at_tok_nextbool(char **p_cur, char *p_out) +{ + int ret; + int result; + + ret = at_tok_nextint(p_cur, &result); + + if (ret < 0) { + return -1; + } + + // booleans should be 0 or 1 + if (!(result == 0 || result == 1)) { + return -1; + } + + if (p_out != NULL) { + *p_out = (char)result; + } + + return ret; +} + +int at_tok_nextstr(char **p_cur, char **p_out) +{ + if (*p_cur == NULL) { + return -1; + } + + *p_out = nextTok(p_cur); + + return 0; +} + +/** returns 1 on "has more tokens" and 0 if no */ +int at_tok_hasmore(char **p_cur) +{ + return ! (*p_cur == NULL || **p_cur == '\0'); +} + +int at_tok_count(const char *in_line) +{ + int commas = 0; + const char *p; + + if (!in_line) + return 0; + + for (p = in_line; *p != '\0' ; p++) { + if (*p == ',') commas++; + } + + return commas; +} + +//fmt: d ~ int, x ~ hexint, b ~ bool, s ~ str +int at_tok_scanf(const char *in_line, const char *fmt, ...) +{ + int n = 0; + int err; + va_list ap; + const char *p = fmt; + void *d; + void *dump; + static char s_line[1024]; + char *line = s_line; + + if (!in_line) + return 0; + + strncpy(s_line, in_line, sizeof(s_line) - 1); + + va_start(ap, fmt); + + err = at_tok_start(&line); + if (err < 0) goto error; + + for (; *p; p++) { + if (*p == ',' || *p == ' ') + continue; + + if (*p != '%') { + goto error; + } + p++; + + d = va_arg(ap, void *); + if (!d) + d = &dump; + + if (!at_tok_hasmore(&line)) + break; + + if (*line == '-' && *(line + 1) == ',') { + line += 2; + n++; + if (*p == 'd') + *(int *)d = -1; + continue; + } + + switch(*p) { + case 'd': + err = at_tok_nextint(&line, (int *)d); + if (err < 0) goto error; + break; + case 'x': + err = at_tok_nexthexint(&line, (int *)d); + if (err < 0) goto error; + break; + case 'b': + err = at_tok_nextbool(&line, (char *)d); + if (err < 0) goto error; + break; + case 's': + err = at_tok_nextstr(&line, (char **)d); //if strdup(line), here return free memory to caller + if (err < 0) goto error; + break; + default: + goto error; + break; + } + + n++; + } + + va_end(ap); + +error: + //free(line); + return n; +} diff --git a/wwan/app/quectel_cm_5G/src/at_tok.h b/wwan/app/quectel_cm_5G/src/at_tok.h new file mode 100644 index 0000000..2fcb683 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/at_tok.h @@ -0,0 +1,33 @@ +/* //device/system/reference-ril/at_tok.h +** +** Copyright 2006, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef AT_TOK_H +#define AT_TOK_H 1 + +int at_tok_start(char **p_cur); +int at_tok_nextint(char **p_cur, int *p_out); +int at_tok_nexthexint(char **p_cur, int *p_out); + +int at_tok_nextbool(char **p_cur, char *p_out); +int at_tok_nextstr(char **p_cur, char **out); + +int at_tok_hasmore(char **p_cur); +int at_tok_count(const char *in_line); +int at_tok_scanf(const char *line, const char *fmt, ...); + +#endif /*AT_TOK_H */ + diff --git a/wwan/app/quectel_cm_5G/src/atc.c b/wwan/app/quectel_cm_5G/src/atc.c new file mode 100644 index 0000000..ef78ec6 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/atc.c @@ -0,0 +1,1055 @@ +/****************************************************************************** + @file atc.c + @brief at command. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern int asprintf(char **s, const char *fmt, ...); + +#include "QMIThread.h" + +#include "atchannel.h" +#include "at_tok.h" + +static int asr_style_atc = 0; +static int s_pdp; +#define safe_free(__x) do { if (__x) { free((void *)__x); __x = NULL;}} while(0) +#define safe_at_response_free(__x) { if (__x) { at_response_free(__x); __x = NULL;}} + +#define at_response_error(err, p_response) \ + (err \ + || p_response == NULL \ + || p_response->finalResponse == NULL \ + || p_response->success == 0) + +static int atc_init(PROFILE_T *profile) { + int err; + char *cmd; + ATResponse *p_response = NULL; + + if (profile->proxy[0]) { + s_pdp = profile->pdp; + err = at_send_command_singleline("AT+QNETDEVSTATUS=?", "+QNETDEVSTATUS:", &p_response); + if (at_response_error(err, p_response)) + asr_style_atc = 1; //EC200T/EC100Y do not support this AT, but RG801/RG500U support + safe_at_response_free(p_response); + + return err; + } + + err = at_handshake(); + if (err) { + dbg_time("handshake fail, TODO ... "); + goto exit; + } + + s_pdp = profile->pdp; + at_send_command_singleline("AT+QCFG=\"usbnet\"", "+QCFG:", NULL); + at_send_command_multiline("AT+QNETDEVCTL=?", "+QNETDEVCTL:", NULL); + at_send_command("AT+CGREG=2", NULL); //GPRS Network Registration Status + at_send_command("AT+CEREG=2", NULL); //EPS Network Registration Status + at_send_command("AT+C5GREG=2", NULL); //5GS Network Registration Status + + err = at_send_command_singleline("AT+QNETDEVSTATUS=?", "+QNETDEVSTATUS:", &p_response); + if (at_response_error(err, p_response)) + asr_style_atc = 1; //EC200T/EC100Y do not support this AT, but RG801/RG500U support + safe_at_response_free(p_response); + + err = at_send_command_singleline("AT+QCFG=\"NAT\"", "+QCFG:", &p_response); + if (!at_response_error(err, p_response)) { + int old_nat, new_nat = asr_style_atc ? 1 : 0; + + err = at_tok_scanf(p_response->p_intermediates->line, "%s%d", NULL, &old_nat); + if (err == 2 && old_nat != new_nat) { + safe_at_response_free(p_response); + asprintf(&cmd, "AT+QCFG=\"NAT\",%d", new_nat); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + if (!at_response_error(err, p_response)) { + err = at_send_command("at+cfun=1,1",NULL); + if (!err) + g_donot_exit_when_modem_hangup = 1; + //reboot to take effect + } + safe_at_response_free(p_response); + } + err = 0; + } + safe_at_response_free(p_response); + +exit: + return err; +} + +static int atc_deinit(void) { + return 0; +} + +/** + * Called by atchannel when an unsolicited line appears + * This is called on atchannel's reader thread. AT commands may + * not be issued here + */ +static void onUnsolicited (const char *s, const char *sms_pdu) +{ + (void)sms_pdu; + + if (strStartsWith(s, "+QNETDEVSTATUS:")) { + qmidevice_send_event_to_main(RIL_UNSOL_DATA_CALL_LIST_CHANGED); + } + else if (strStartsWith(s, "+CGREG:") + || strStartsWith(s, "+CEREG:") + || strStartsWith(s, "+C5GREG:")) { + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } +} + +static void onTimeout(void) { + dbg_time("%s", __func__); + //TODO +} + +static void onClose(void) { + dbg_time("%s", __func__); +} + +static void * atc_read_thread(void *param) { + PROFILE_T *profile = (PROFILE_T *)param; + const char *cdc_wdm = (const char *)profile->qmichannel; + int wait_for_request_quit = 0; + int atc_fd; + + atc_fd = cm_open_dev(cdc_wdm); + if (atc_fd <= 0) { + dbg_time("fail to open (%s), errno: %d (%s)", cdc_wdm, errno, strerror(errno)); + goto __quit; + } + + dbg_time("atc_fd = %d", atc_fd); + + if (at_open(atc_fd, onUnsolicited, 0)) + goto __quit; + + at_set_on_timeout(onTimeout); + at_set_on_reader_closed(onClose); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); + + while (atc_fd > 0) { + struct pollfd pollfds[] = {{atc_fd, POLLIN, 0}, {qmidevice_control_fd[1], POLLIN, 0}}; + int ne, ret, nevents = 2; + + ret = poll(pollfds, nevents, wait_for_request_quit ? 1000 : -1); + + if (ret == 0 && wait_for_request_quit) { + break; + } + + if (ret < 0) { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + break; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dbg_time("%s poll err/hup/inval", __func__); + dbg_time("epoll fd = %d, events = 0x%04x", fd, revents); + if (revents & (POLLERR | POLLHUP | POLLNVAL)) + goto __quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (atc_fd == fd) { + usleep(10*1000); //let atchannel.c read at response. + } + else if (fd == qmidevice_control_fd[1]) { + int triger_event; + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + //dbg_time("triger_event = 0x%x", triger_event); + switch (triger_event) { + case RIL_REQUEST_QUIT: + goto __quit; + break; + case SIG_EVENT_STOP: + wait_for_request_quit = 1; + break; + default: + break; + } + } + } + } + } + +__quit: + at_close(); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + dbg_time("%s exit", __func__); + + return NULL; +} + +const struct qmi_device_ops atc_dev_ops = { + .init = atc_init, + .deinit = atc_deinit, + .read = atc_read_thread, +}; + +static int requestBaseBandVersion(PROFILE_T *profile) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + + (void)profile; + + err = at_send_command_multiline("AT+CGMR", "\0", &p_response); + if (at_response_error(err, p_response)) + goto exit; + + if (p_response->p_intermediates && p_response->p_intermediates->line) { + strncpy(profile->BaseBandVersion, p_response->p_intermediates->line, sizeof(profile->BaseBandVersion) - 1); + retVal = 0; + } + +exit: + safe_at_response_free(p_response); + return retVal; +} + +static int requestGetSIMStatus(SIM_Status *pSIMStatus) +{ + int err; + ATResponse *p_response = NULL; + char *cpinLine; + char *cpinResult; + int ret = SIM_NOT_READY; + + err = at_send_command_singleline("AT+CPIN?", "+CPIN:", &p_response); + if (at_response_error(err, p_response)) + goto done; + + switch (at_get_cme_error(p_response)) + { + case CME_SUCCESS: + break; + + case CME_SIM_NOT_INSERTED: + case CME_OPERATION_NOT_ALLOWED: + case CME_FAILURE: + ret = SIM_ABSENT; + goto done; + + default: + ret = SIM_NOT_READY; + goto done; + } + + cpinLine = p_response->p_intermediates->line; + err = at_tok_start (&cpinLine); + + if (err < 0) + { + ret = SIM_NOT_READY; + goto done; + } + + err = at_tok_nextstr(&cpinLine, &cpinResult); + + if (err < 0) + { + ret = SIM_NOT_READY; + goto done; + } + + if (0 == strcmp (cpinResult, "SIM PIN")) + { + ret = SIM_PIN; + goto done; + } + else if (0 == strcmp (cpinResult, "SIM PUK")) + { + ret = SIM_PUK; + goto done; + } + else if (0 == strcmp (cpinResult, "PH-NET PIN")) + { + return SIM_NETWORK_PERSONALIZATION; + } + else if (0 != strcmp (cpinResult, "READY")) + { + /* we're treating unsupported lock types as "sim absent" */ + ret = SIM_ABSENT; + goto done; + } + + ret = SIM_READY; + +done: + safe_at_response_free(p_response); + *pSIMStatus = ret; + return err; +} + +static int requestEnterSimPin(const char *pPinCode) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + + asprintf(&cmd, "AT+CPIN=%s", pPinCode); + err = at_send_command(cmd, NULL); + safe_free(cmd); + + if (!at_response_error(err, p_response)) { + retVal = 0; + } + + safe_at_response_free(p_response); + return retVal; +} + +static int requestSetProfile(PROFILE_T *profile) { + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + const char *new_apn = profile->apn ? profile->apn : ""; + const char *new_user = profile->user ? profile->user : ""; + const char *new_password = profile->password ? profile->password : ""; + const char *ipStr[] = {"NULL", "IPV4", "IPV6", "IPV4V6"}; + + dbg_time("%s[%d] %s/%s/%s/%d/%s", __func__, + profile->pdp, profile->apn, profile->user, profile->password, + profile->auth,ipStr[profile->iptype]); + + if ( !strcmp(profile->old_apn, new_apn) && !strcmp(profile->old_user, new_user) + && !strcmp(profile->old_password, new_password) + && profile->old_iptype == profile->iptype + && profile->old_auth == profile->auth) + { + dbg_time("no need to set skip the rest"); + return 0; + } + + asprintf(&cmd, "AT+QICSGP=%d,%d,\"%s\",\"%s\",\"%s\",%d", + profile->pdp, profile->iptype, new_apn, new_user, new_password, profile->auth); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + if (at_response_error(err, p_response)) { + safe_at_response_free(p_response); + asprintf(&cmd, "AT+CGDCONT=%d,\"%s\",\"%s\"", profile->pdp, ipStr[profile->iptype], new_apn); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + } + + safe_at_response_free(p_response); + return 1; +} + +static int requestGetProfile(PROFILE_T *profile) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + int pdp; + int old_iptype = 1; // 1 ~ IPV4, 2 ~ IPV6, 3 ~ IPV4V6 + char *old_apn = "", *old_user = "", *old_password = ""; + int old_auth = 0; + const char *ipStr[] = {"NULL", "IPV4", "IPV6", "IPV4V6"}; + + if (profile->enable_ipv4 && profile->enable_ipv6) + profile->iptype = 3; + else if (profile->enable_ipv6) + profile->iptype = 2; + else + profile->iptype = 1; + +_re_check: + asprintf(&cmd, "AT+QICSGP=%d", profile->pdp); + err = at_send_command_singleline(cmd, "+QICSGP:", &p_response); + safe_free(cmd); + if (err == AT_ERROR_INVALID_RESPONSE && p_response == NULL) { + //bug of RG801H + safe_at_response_free(p_response); + asprintf(&cmd, "AT+QICSGP=%d,%d,\"\",\"\",\"\",0", profile->pdp, profile->iptype); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + if (!at_response_error(err, p_response)) { + safe_at_response_free(p_response); + goto _re_check; + } + } + + if (!at_response_error(err, p_response)) { + err = at_tok_scanf(p_response->p_intermediates->line, + "%d%s%s%s%d", &old_iptype, &old_apn, &old_user, &old_password, &old_auth); + + if (err != 4 || pdp != profile->pdp) + goto _error; + } + else { + ATLine *atLine = NULL; + char *cgdcont_iptype = NULL; + + safe_at_response_free(p_response); + err = at_send_command_multiline("AT+CGDCONT?", "+CGDCONT:", &p_response); + if (at_response_error(err, p_response)) + goto _error; + + atLine = p_response->p_intermediates; + while (atLine) { + err = at_tok_scanf(atLine->line, "%d%s%s", &pdp, &cgdcont_iptype, &old_apn); + if (err == 3 && pdp == profile->pdp) { + if (!strcasecmp(cgdcont_iptype, ipStr[3])) + old_iptype = 3; + else if (!strcasecmp(cgdcont_iptype, ipStr[2])) + old_iptype = 2; + else + old_iptype = 1; + break; + } + old_apn = NULL; + atLine = atLine->p_next; + } + } + + retVal = 0; + +_error: + if (!old_apn) old_apn = ""; + if (!old_user) old_user = ""; + if (!old_password) old_password = ""; + + strncpy(profile->old_apn, old_apn, sizeof(profile->old_apn)); + strncpy(profile->old_user, old_user, sizeof(profile->old_user)); + strncpy(profile->old_password, old_password, sizeof(profile->old_password)); + profile->old_auth = old_auth; + profile->old_iptype = old_iptype; + + dbg_time("%s[%d] %s/%s/%s/%d/%s", __func__, + profile->pdp, profile->old_apn, profile->old_user, profile->old_password, + profile->old_auth, ipStr[profile->old_iptype]); + + safe_at_response_free(p_response); + + return retVal; +} + +static int requestRegistrationState(UCHAR *pPSAttachedState) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + ATLine *p_cur; + int i; + int cops_act = -1; + int state = 0, lac = 0, cid = 0, act = 0; + int commas; + char *line; + + *pPSAttachedState = 0; + + err = at_send_command_multiline( + "AT+COPS=3,0;+COPS?;+COPS=3,1;+COPS?;+COPS=3,2;+COPS?", + "+COPS:", &p_response); + if (at_response_error(err, p_response)) + goto error; + +/* +AT< +COPS: 0,0,"CHINA MOBILE",13 +AT< +COPS: 0,1,"CMCC",13 +AT< +COPS: 0,2,"46000",13 +AT< OK +*/ + retVal = 0; + for (i = 0, p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next, i++) { + err = at_tok_scanf(p_cur->line, "%d%d%s%d", NULL, NULL, NULL, &cops_act); + if (err != 4) goto error; + + break; + } + + safe_at_response_free(p_response); + switch (cops_act) { + case 2: //UTRAN + case 3: //GSM W/EGPRS + case 4: //UTRAN W/HSDPA + case 5: //UTRAN W/HSUPA + case 6: //UTRAN W/HSDPA and HSUPA + //AT+CGREG GPRS Network Registration Status + err = at_send_command_singleline("AT+CGREG?", "+CGREG:", &p_response); + break; + + case 7: //E-UTRAN + case 13: //E-UTRAN-NR dual connectivity + //AT+CEREG EPS Network Registration Status + err = at_send_command_singleline("AT+CEREG?", "+CEREG:", &p_response); + break; + + case 10: //E-UTRAN connected to a 5GCN + case 11: //NR connected to a 5GCN + case 12: //NG-RAN + //AT+C5GREG 5GS Network Registration Status + err = at_send_command_singleline("AT+C5GREG?", "+C5GREG:", &p_response); + break; + + default: + goto error; + break; + } + + if (at_response_error(err, p_response)) + goto error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto error; + + line = p_response->p_intermediates->line; + commas = at_tok_count(line); + switch (commas) { + case 0: /* +CREG: */ + err = at_tok_nextint(&line, &state); + if (err < 0) goto error; + break; + + case 1: /* +CREG: , */ + err = at_tok_scanf(line, "%d%d", NULL, &state); + if (err != 2) goto error; + break; + + case 2: /* +CREG: , , */ + err = at_tok_scanf(line, "%d%x%x", NULL, &state, &lac, &cid); + if (err != 3) goto error; + break; + + case 3: /* +CREG: , , , */ + err = at_tok_scanf(line, "%d%d%x%x", NULL, &state, &lac, &cid); + if (err != 4) goto error; + break; + + case 4: //, , , , */ + case 5: + case 6: + case 7: + err = at_tok_scanf(line, "%d%d%x%x%d", NULL, &state, &lac, &cid, &act); + if (err != 5) goto error; + break; + + default: + goto error; + } + + //dbg_time("state=%d", state); + + if (state == 1 || state == 5) { //Registered, home network / roaming + *pPSAttachedState = 1; + } + +error: + safe_at_response_free(p_response); + return retVal; +} + +static int requestSetupDataCall(PROFILE_T *profile, int curIpFamily) { + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + ATLine *p_cur = NULL; + int pdp = profile->pdp; + int state = 0; + + (void)curIpFamily; + + if (strStartsWith(profile->BaseBandVersion, "RG801H") || strStartsWith(profile->BaseBandVersion, "EC200H")) { + //RG801H will miss USB_CDC_NOTIFY_NETWORK_CONNECTION + asprintf(&cmd, "ifconfig %s up", profile->usbnet_adapter); + if (system(cmd)) {}; + safe_free(cmd); + } + + if (asr_style_atc) { + err = at_send_command_multiline("AT+CGACT?", "+CGACT:", &p_response); + if (at_response_error(err, p_response)) + goto _error; + + for (p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next) { + int cid = 0; + state = 0; + + err = at_tok_scanf(p_cur->line, "%d%d", &cid, &state); + if (cid == pdp) + break; + else if(state) + state = 0; + } + safe_at_response_free(p_response); + + if (state == 0) { + asprintf(&cmd, "AT+CGACT=1,%d", pdp); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + if (at_response_error(err, p_response)) + goto _error; + } + } + + if(asr_style_atc) + asprintf(&cmd, "AT+QNETDEVCTL=1,%d,%d", pdp, 1); + else + asprintf(&cmd, "AT+QNETDEVCTL=%d,1,%d", pdp, 1); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + + if (at_response_error(err, p_response)) + goto _error; + + if (!asr_style_atc) { //TODO some modems do not sync return setup call resule + int t = 0; + + while (t++ < 15) { + asprintf(&cmd, "AT+QNETDEVSTATUS=%d", pdp); + err = at_send_command_singleline(cmd, "+QNETDEVSTATUS", &p_response); + safe_free(cmd); + if (err) goto _error; + + if (!at_response_error(err, p_response)) { + break; + } + safe_at_response_free(p_response); + sleep(1); + } + } + + //some modem do not report URC + qmidevice_send_event_to_main(RIL_UNSOL_DATA_CALL_LIST_CHANGED); + +_error: + safe_at_response_free(p_response); + //dbg_time("%s err=%d", __func__, err); + return err; +} + +static int at_netdevstatus(int pdp, unsigned int *pV4Addr) { + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + char *ipv4_address = NULL; + char *ipv4_gate = NULL; + char *ipv4_DHCP = NULL; + char *ipv4_pDNS = NULL; + char *ipv4_sDNS = NULL; + char *ipv6_address = NULL; + char *ipv6_gate = NULL; + char *ipv6_DHCP = NULL; + char *ipv6_pDNS = NULL; + char *ipv6_sDNS = NULL; + + *pV4Addr = 0; + + asprintf(&cmd, "AT+QNETDEVSTATUS=%d", pdp); + err = at_send_command_singleline(cmd, "+QNETDEVSTATUS", &p_response); + safe_free(cmd); + if (at_response_error(err, p_response)) goto _error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto _error; + + err = at_tok_scanf(p_response->p_intermediates->line, "%s%s%s%s%s%s%s%s%s%s", + &ipv4_address, &ipv4_gate, &ipv4_DHCP, &ipv4_pDNS, &ipv4_sDNS, + &ipv6_address, &ipv6_gate, &ipv6_DHCP, &ipv6_pDNS, &ipv6_sDNS); + if (err > 0) { +#if 0 + dbg_time("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s", + ipv4_address, ipv4_gate, ipv4_DHCP, ipv4_pDNS, ipv4_sDNS, + ipv6_address, ipv6_gate, ipv6_DHCP, ipv6_pDNS, ipv6_sDNS); +#endif + + if (ipv4_address && ipv4_address[0]) { + int addr[4] = {0, 0, 0, 0}; + + if (strstr(ipv4_address, ".")) { + sscanf(ipv4_address, "%d.%d.%d.%d", &addr[0], &addr[1], &addr[2], &addr[3]); + } + else { + sscanf(ipv4_address, "%02X%02X%02X%02X", &addr[3], &addr[2], &addr[1], &addr[0]); + } + *pV4Addr = (addr[0]) | (addr[1]<<8) | (addr[2]<<16) | (addr[3]<<24); + } + } + +_error: + safe_at_response_free(p_response); + return 0; +} + +static int requestQueryDataCall(UCHAR *pConnectionStatus, int curIpFamily) { + int err; + ATResponse *p_response = NULL; + ATLine *p_cur = NULL; + int state = 0; + int bind = 0; + int cid; + int pdp = s_pdp; + unsigned int v4Addr = 0; + + (void)curIpFamily; + + *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + + if (!asr_style_atc) { + err = at_netdevstatus(pdp, &v4Addr); + if (!err && v4Addr) { + *pConnectionStatus = QWDS_PKT_DATA_CONNECTED; + //if (profile->ipv4.Address == v4Addr) {} //TODO + } + return err; + } + + err = at_send_command_multiline("AT+QNETDEVCTL?", "+QNETDEVCTL:", &p_response); + if (at_response_error(err, p_response)) + goto _error; + + for (p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next) + { + //+QNETDECTL:,,, + err = at_tok_scanf(p_cur->line, "%d%d%d%d", &bind, &cid, NULL, &state); + if (err != 4 || cid != pdp) + continue; + if (bind != 1) + bind = 0; + } + safe_at_response_free(p_response); + + if (bind == 0 || state == 0) + goto _error; + + err = at_send_command_multiline("AT+CGACT?", "+CGACT:", &p_response); + if (at_response_error(err, p_response)) + goto _error; + + for (p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next) + { + state = 0; + err = at_tok_scanf(p_cur->line, "%d%d", &cid, &state); + if (cid == pdp) + break; + else if(state) + state = 0; + } + safe_at_response_free(p_response); + + if (bind && state) + *pConnectionStatus = QWDS_PKT_DATA_CONNECTED; + +_error: + safe_at_response_free(p_response); + //dbg_time("%s err=%d, call_state=%d", __func__, err, *pConnectionStatus); + return 0; +} + +static int requestDeactivateDefaultPDP(PROFILE_T *profile, int curIpFamily) { + char *cmd = NULL; + int pdp = profile->pdp; + + (void)curIpFamily; + + if (asr_style_atc) + asprintf(&cmd, "AT+QNETDEVCTL=0,%d,%d", pdp, 0); + else + asprintf(&cmd, "AT+QNETDEVCTL=%d,0,%d", pdp, 0); + at_send_command(cmd, NULL); + safe_free(cmd); + + //dbg_time("%s err=%d", __func__, err); + return 0; +} + +static int requestGetIPAddress(PROFILE_T *profile, int curIpFamily) { + int err; + ATResponse *p_response = NULL; + char *cmd = NULL; + ATLine *p_cur = NULL; + int pdp = profile->pdp; + unsigned int v4Addr = 0; + + (void)curIpFamily; + + if (!asr_style_atc) { + err = at_netdevstatus(pdp, &v4Addr); + goto _error; + } + + asprintf(&cmd, "AT+CGPADDR=%d", profile->pdp); + err = at_send_command_singleline(cmd, "+CGPADDR:", &p_response); + safe_free(cmd); + if (at_response_error(err, p_response)) + goto _error; + + //+CGPADDR: 1,"10.201.80.91","2409:8930:4B3:41C7:F9B8:3D9B:A2F7:CA96" + for (p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next) + { + char *ipv4 = NULL; + char *ipv6 = NULL; + + err = at_tok_scanf(p_cur->line, "%d%s%s", &pdp, &ipv4, &ipv6); + if (err < 2 || pdp != profile->pdp) + continue; + + if (ipv4) { + int addr[4] = {0, 0, 0, 0}; + + sscanf(ipv4, "%d.%d.%d.%d", &addr[0], &addr[1], &addr[2], &addr[3]); + v4Addr = (addr[0]) | (addr[1]<<8) | (addr[2]<<16) | (addr[3]<<24); + break; + } + } + +_error: + if (v4Addr && profile->ipv4.Address != v4Addr) { + unsigned char *v4 = (unsigned char *)&v4Addr; + + profile->ipv4.Address = v4Addr; + dbg_time("%s %d.%d.%d.%d", __func__, v4[0], v4[1], v4[2], v4[3]); + } + + //dbg_time("%s err=%d", __func__, err); + return v4Addr ? 0 : -1; +} + +static int requestGetSignalInfo(void) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + int i; + ATLine *p_cur = NULL; + char *rat = NULL; + int cops_act = 0; + int is_nr5g_nsa = 0, nr5g_sa = 0; + int verbose = 0; + + err = at_send_command_singleline("at+cops?", "+COPS:", &p_response); + if (at_response_error(err, p_response)) goto _error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto _error; + + retVal = 0; + err = at_tok_scanf(p_response->p_intermediates->line, "%d%d%s%d", NULL, NULL, NULL, &cops_act); + if (err != 4) goto _error; + + nr5g_sa = (cops_act == 11); + + safe_at_response_free(p_response); + err = at_send_command_multiline("at+qeng=\"servingcell\"", "+QENG:", &p_response); + if (at_response_error(err, p_response)) + goto _error; + + for (i = 0, p_cur = p_response->p_intermediates; p_cur != NULL; p_cur = p_cur->p_next, i++) { + char *type, *state; + + err = at_tok_scanf(p_cur->line, "%s%s", &type, &state); + if (err != 2 || strcmp(type, "servingcell")) + continue; + + if (!strcmp(state, "SEARCH") || !strcmp(state, "LIMSRV")) + continue; + + if (!strcmp(state, "NOCONN") || !strcmp(state, "CONNECT")) { + err = at_tok_scanf(p_cur->line, "%s%s%s", &type, &state, &rat); + if (err != 3) + continue; + } + else { + rat = state; + } + + if (!strcmp(rat, "NR5G-SA")) + { + //+QENG: "servingcell",,"NR5G-SA",,,,,,,,,,,,,, + //+QENG: "servingcell","NOCONN","NR5G-SA","TDD", 454,12,0,21,4ED,636576,78,3,-85,-11,32,0,5184 + struct qeng_servingcell_nr5g_sa { + char *cell_type, *state, *rat, *is_tdd; + int MCC, MNC, cellID/*hex*/; + int PCID, TAC/*hex*/, ARFCN; + int band, NR_DL_bandwidth; + int RSRP, RSRQ, RSSI, SINR; + }; + struct qeng_servingcell_nr5g_sa nr5g_sa; + + memset(&nr5g_sa, 0, sizeof(nr5g_sa)); + err = at_tok_scanf(p_cur->line, "%s,%s,%s,%s,%d,%d,%x,%d,%x,%d,%d,%d,%d,%d,%d,%d", + &nr5g_sa.cell_type, &nr5g_sa.state, &nr5g_sa.rat, &nr5g_sa.is_tdd, + &nr5g_sa.MCC, &nr5g_sa.MNC, &nr5g_sa.cellID, &nr5g_sa.PCID, &nr5g_sa.TAC, + &nr5g_sa.ARFCN, &nr5g_sa.band, &nr5g_sa.NR_DL_bandwidth, + &nr5g_sa.RSRP, &nr5g_sa.RSRQ, &nr5g_sa.RSSI, &nr5g_sa.SINR); + + if (err >= 13 && verbose) { + dbg_time("%s,%s,%s,%s,%d,%d,%x,%d,%x,%d,%d,%d,%d,%d,%d,%d", + nr5g_sa.cell_type, nr5g_sa.state, nr5g_sa.rat, nr5g_sa.is_tdd, + nr5g_sa.MCC, nr5g_sa.MNC, nr5g_sa.cellID, nr5g_sa.PCID, nr5g_sa.TAC, + nr5g_sa.ARFCN, nr5g_sa.band, nr5g_sa.NR_DL_bandwidth, + nr5g_sa.RSRP, nr5g_sa.RSRQ, nr5g_sa.RSSI, nr5g_sa.SINR); + } + } + else if (!strcmp(rat, "NR5G-NSA")) + { + //+QENG: "NR5G-NSA",,,,,< SINR>,,, + struct qeng_servingcell_nr5g_nsa { + char *mcc, *mnc; + int pcid, rsrp, sinr, rsrq; + }; + struct qeng_servingcell_nr5g_nsa nr5g_nsa; + + memset(&nr5g_nsa, 0, sizeof(nr5g_nsa)); + err = at_tok_scanf(p_cur->line, "%s%s%s%s%d%d%d%dd", + NULL, NULL, &nr5g_nsa.mcc, &nr5g_nsa.mnc, &nr5g_nsa.pcid, &nr5g_nsa.rsrp, &nr5g_nsa.sinr, &nr5g_nsa.rsrq); + if (err == 8 && verbose) + { + dbg_time("mcc=%s, mnc=%s, pcid=%d, rsrp=%d, sinr=%d, rsrq=%d", + nr5g_nsa.mcc, nr5g_nsa.mnc, nr5g_nsa.pcid, nr5g_nsa.rsrp, nr5g_nsa.sinr, nr5g_nsa.rsrq); + } + + is_nr5g_nsa = 1; + } + else if (!strcmp(rat, "LTE")) + { + //+QENG: "LTE",,,,,,,,,,,,,,,,, + struct qeng_servingcell_lte { + char *is_tdd, *mcc, *mnc; + int cellID/*hex*/, pcid, earfcn, freq_band_ind, ul_bandwidth, dl_bandwidth; + int tac/*hex*/, rsrp, rsrq, rssi, sinr, cqi,tx_power,srxlev; + }; + struct qeng_servingcell_lte lte; + + memset(<e, 0, sizeof(lte)); + if (!strcmp(rat, state)) + err = at_tok_scanf(p_cur->line, "%s%s%s%s%s%x%d%d%d%d%d%x%d%d%d%d%d%d%d", + NULL, NULL, <e.is_tdd, <e.mcc, <e.mnc, + <e.cellID, <e.pcid, <e.earfcn, <e.freq_band_ind, <e.ul_bandwidth, <e.dl_bandwidth, + <e.tac, <e.rsrp, <e.rsrq, <e.rssi, <e.sinr, <e.cqi, <e.tx_power, <e.srxlev); + else + err = at_tok_scanf(p_cur->line, "%s%s%s%s%s%s%x%d%d%d%d%d%x%d%d%d%d%d%d%d", + NULL, NULL, NULL, <e.is_tdd, <e.mcc, <e.mnc, + <e.cellID, <e.pcid, <e.earfcn, <e.freq_band_ind, <e.ul_bandwidth, <e.dl_bandwidth, + <e.tac, <e.rsrp, <e.rsrq, <e.rssi, <e.sinr, <e.cqi, <e.tx_power, <e.srxlev); + + if (err >= 18 && verbose) + { + dbg_time("is_tdd=%s, mcc=%s, mnc=%s", lte.is_tdd, lte.mcc, lte.mnc); + dbg_time("cellID=%x, pcid=%d, earfcn=%d", lte.cellID, lte.pcid, lte.earfcn); + dbg_time("freq_band_ind=%d, ul_bandwidth=%d, dl_bandwidth=%d", lte.freq_band_ind, lte.ul_bandwidth, lte.dl_bandwidth); + dbg_time("tac=%x, rsrp=%d, rsrq=%d, rssi=%d, sinr=%d", lte.tac, lte.rsrp, lte.rsrq, lte.rssi, lte.sinr); + dbg_time("cqi=%d, tx_power=%d, earfcn=%d", lte.cqi, lte.tx_power, lte.srxlev); + } + } + } + + if (is_nr5g_nsa) { + int endc_avl, plmn_info_list_r15_avl, endc_rstr, nr5g_basic; + + is_nr5g_nsa = 0; + safe_at_response_free(p_response); + err = at_send_command_multiline("at+qendc", "+QENDC:", &p_response); + if (at_response_error(err, p_response)) goto _error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto _error; + + err = at_tok_scanf(p_response->p_intermediates->line, "%d%d%d%d", + &endc_avl, &plmn_info_list_r15_avl, &endc_rstr, &nr5g_basic); + if (err == 4 && nr5g_basic) { + is_nr5g_nsa = 1; + } + } + + if (verbose) + dbg_time("cops_act=%d, nr5g_nsa=%d, nr5g_sa=%d", cops_act, is_nr5g_nsa, nr5g_sa); + +_error: + safe_at_response_free(p_response); + return retVal; +} + +static int requestGetICCID(void) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + char *iccid; + + err = at_send_command_singleline("AT+QCCID", "+QCCID:", &p_response); + if (at_response_error(err, p_response)) goto _error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto _error; + + err = at_tok_scanf(p_response->p_intermediates->line, "%s", &iccid); + if (err != 1) goto _error; + + if (iccid && iccid[0]) { + dbg_time("%s %s", __func__, iccid); + retVal = 0; + } + +_error: + safe_at_response_free(p_response); + return retVal; +} + +static int requestGetIMSI(void) { + int retVal = -1; + int err; + ATResponse *p_response = NULL; + char *imsi; + + err = at_send_command_numeric("AT+CIMI", &p_response); + if (at_response_error(err, p_response)) goto _error; + if (!p_response->p_intermediates || !p_response->p_intermediates->line) goto _error; + + imsi = p_response->p_intermediates->line; + if (imsi) { + dbg_time("%s %s", __func__, imsi); + retVal = 0; + } + +_error: + safe_at_response_free(p_response); + return retVal; +} + +const struct request_ops atc_request_ops = { + .requestBaseBandVersion = requestBaseBandVersion, + .requestGetSIMStatus = requestGetSIMStatus, + .requestEnterSimPin = requestEnterSimPin, + .requestSetProfile = requestSetProfile, + .requestGetProfile = requestGetProfile, + .requestRegistrationState = requestRegistrationState, + .requestSetupDataCall = requestSetupDataCall, + .requestQueryDataCall = requestQueryDataCall, + .requestDeactivateDefaultPDP = requestDeactivateDefaultPDP, + .requestGetIPAddress = requestGetIPAddress, + .requestGetSignalInfo = requestGetSignalInfo, + .requestGetICCID = requestGetICCID, + .requestGetIMSI = requestGetIMSI, +}; + diff --git a/wwan/app/quectel_cm_5G/src/atchannel.c b/wwan/app/quectel_cm_5G/src/atchannel.c new file mode 100644 index 0000000..90aa1c4 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/atchannel.c @@ -0,0 +1,1037 @@ +/* //device/system/reference-ril/atchannel.c +** +** Copyright 2006, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#include "atchannel.h" +#include "at_tok.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "QMIThread.h" +#define LOGE dbg_time +#define LOGD dbg_time + +#define NUM_ELEMS(x) (sizeof(x)/sizeof(x[0])) + +#define MAX_AT_RESPONSE sizeof(cm_recv_buf) +#define HANDSHAKE_RETRY_COUNT 8 +#define HANDSHAKE_TIMEOUT_MSEC 1000 + +static pthread_t s_tid_reader; +static int s_fd = -1; /* fd of the AT channel */ +static ATUnsolHandler s_unsolHandler; +static int s_atc_proxy = 0; /* fd of the AT channel */ + +/* for input buffering */ + +static char *s_ATBuffer = (char *)cm_recv_buf; +static char *s_ATBufferCur = (char *)cm_recv_buf; + +static int s_readCount = 0; + +#if AT_DEBUG +void AT_DUMP(const char* prefix, const char* buff, int len) +{ + if (len < 0) + len = strlen(buff); + LOGD("%.*s", len, buff); +} +#endif + +/* + * for current pending command + * these are protected by s_commandmutex + */ +static ATCommandType s_type; +static const char *s_responsePrefix = NULL; +static const char *s_smsPDU = NULL; +static const char *s_raw_data = NULL; +static size_t s_raw_len; +static ATResponse *sp_response = NULL; + +static void (*s_onTimeout)(void) = NULL; +static void (*s_onReaderClosed)(void) = NULL; +static int s_readerClosed; + +static void onReaderClosed(); +static int writeCtrlZ (const char *s); +static int writeline (const char *s); +static int writeraw (const char *s, size_t len); + +static void sleepMsec(long long msec) +{ + struct timespec ts; + int err; + + ts.tv_sec = (msec / 1000); + ts.tv_nsec = (msec % 1000) * 1000 * 1000; + + do { + err = nanosleep (&ts, &ts); + } while (err < 0 && errno == EINTR); +} + +/** returns 1 if line starts with prefix, 0 if it does not */ +int strStartsWith(const char *line, const char *prefix) +{ + for ( ; *line != '\0' && *prefix != '\0' ; line++, prefix++) { + if (*line != *prefix) { + return 0; + } + } + + return *prefix == '\0'; +} + +/** add an intermediate response to sp_response*/ +static void addIntermediate(const char *line) +{ + ATLine *p_new; + + p_new = (ATLine *) malloc(sizeof(ATLine)); + + p_new->line = strdup(line); + + /* note: this adds to the head of the list, so the list + will be in reverse order of lines received. the order is flipped + again before passing on to the command issuer */ + p_new->p_next = sp_response->p_intermediates; + sp_response->p_intermediates = p_new; +} + + +/** + * returns 1 if line is a final response indicating error + * See 27.007 annex B + * WARNING: NO CARRIER and others are sometimes unsolicited + */ +static const char * s_finalResponsesError[] = { + "ERROR", + "+CMS ERROR:", + "+CME ERROR:", + "NO CARRIER", /* sometimes! */ + "NO ANSWER", + "NO DIALTONE", + "COMMAND NOT SUPPORT", +}; +static int isFinalResponseError(const char *line) +{ + size_t i; + + for (i = 0 ; i < NUM_ELEMS(s_finalResponsesError) ; i++) { + if (strStartsWith(line, s_finalResponsesError[i])) { + return 1; + } + } + + return 0; +} + +/** + * returns 1 if line is a final response indicating success + * See 27.007 annex B + * WARNING: NO CARRIER and others are sometimes unsolicited + */ +static const char * s_finalResponsesSuccess[] = { + "OK", + "+QIND: \"FOTA\",\"END\",0", + "CONNECT" /* some stacks start up data on another channel */ +}; + +static int isFinalResponseSuccess(const char *line) +{ + size_t i; + + for (i = 0 ; i < NUM_ELEMS(s_finalResponsesSuccess) ; i++) { + if (strStartsWith(line, s_finalResponsesSuccess[i])) { + return 1; + } + } + + return 0; +} + +#if 0 +/** + * returns 1 if line is a final response, either error or success + * See 27.007 annex B + * WARNING: NO CARRIER and others are sometimes unsolicited + */ +static int isFinalResponse(const char *line) +{ + return isFinalResponseSuccess(line) || isFinalResponseError(line); +} +#endif + +/** + * returns 1 if line is the first line in (what will be) a two-line + * SMS unsolicited response + */ +static const char * s_smsUnsoliciteds[] = { + "+CMT:", + "+CDS:", + "+CBM:", + "+CMTI:" +}; +static int isSMSUnsolicited(const char *line) +{ + size_t i; + + for (i = 0 ; i < NUM_ELEMS(s_smsUnsoliciteds) ; i++) { + if (strStartsWith(line, s_smsUnsoliciteds[i])) { + return 1; + } + } + + return 0; +} + + +/** assumes s_commandmutex is held */ +static void handleFinalResponse(const char *line) +{ + sp_response->finalResponse = strdup(line); + + pthread_cond_signal(&cm_command_cond); +} + +static void handleUnsolicited(const char *line) +{ + if (s_unsolHandler != NULL) { + s_unsolHandler(line, NULL); + } +} + +static void processLine(const char *line) +{ + pthread_mutex_lock(&cm_command_mutex); + + if (sp_response == NULL) { + /* no command pending */ + handleUnsolicited(line); + } else if (s_raw_data != NULL && 0 == strcmp(line, "CONNECT")) { + usleep(500*1000); //for EC20 + writeraw(s_raw_data, s_raw_len); + s_raw_data = NULL; + } else if (isFinalResponseSuccess(line)) { + if(s_atc_proxy) + handleUnsolicited(line); + sp_response->success = 1; + handleFinalResponse(line); + } else if (isFinalResponseError(line)) { + if(s_atc_proxy) + handleUnsolicited(line); + sp_response->success = 0; + handleFinalResponse(line); + } else if (s_smsPDU != NULL && 0 == strcmp(line, "> ")) { + // See eg. TS 27.005 4.3 + // Commands like AT+CMGS have a "> " prompt + writeCtrlZ(s_smsPDU); + s_smsPDU = NULL; + } else switch (s_type) { + case NO_RESULT: + handleUnsolicited(line); + break; + case NUMERIC: + if (sp_response->p_intermediates == NULL + && isdigit(line[0]) + ) { + addIntermediate(line); + } else { + /* either we already have an intermediate response or + the line doesn't begin with a digit */ + handleUnsolicited(line); + } + break; + case SINGLELINE: + if (sp_response->p_intermediates == NULL + && strStartsWith (line, s_responsePrefix) + ) { + addIntermediate(line); + } else { + /* we already have an intermediate response */ + handleUnsolicited(line); + } + break; + case MULTILINE: + if (strStartsWith (line, s_responsePrefix)) { + addIntermediate(line); + } else { + handleUnsolicited(line); + } + break; + + default: /* this should never be reached */ + LOGE("Unsupported AT command type %d\n", s_type); + handleUnsolicited(line); + break; + } + + pthread_mutex_unlock(&cm_command_mutex); +} + + +/** + * Returns a pointer to the end of the next line + * special-cases the "> " SMS prompt + * + * returns NULL if there is no complete line + */ +static char * findNextEOL(char *cur) +{ + if (cur[0] == '>' && cur[1] == ' ' && cur[2] == '\0') { + /* SMS prompt character...not \r terminated */ + return cur+2; + } + + // Find next newline + while (*cur != '\0' && *cur != '\r' && *cur != '\n') cur++; + + return *cur == '\0' ? NULL : cur; +} + + +/** + * Reads a line from the AT channel, returns NULL on timeout. + * Assumes it has exclusive read access to the FD + * + * This line is valid only until the next call to readline + * + * This function exists because as of writing, android libc does not + * have buffered stdio. + */ + +static const char *readline() +{ + ssize_t count; + + char *p_read = NULL; + char *p_eol = NULL; + char *ret; + + /* this is a little odd. I use *s_ATBufferCur == 0 to + * mean "buffer consumed completely". If it points to a character, than + * the buffer continues until a \0 + */ + if (*s_ATBufferCur == '\0') { + /* empty buffer */ + s_ATBufferCur = s_ATBuffer; + *s_ATBufferCur = '\0'; + p_read = s_ATBuffer; + } else { /* *s_ATBufferCur != '\0' */ + /* there's data in the buffer from the last read */ + + // skip over leading newlines + while (*s_ATBufferCur == '\r' || *s_ATBufferCur == '\n') + s_ATBufferCur++; + + p_eol = findNextEOL(s_ATBufferCur); + + if (p_eol == NULL) { + /* a partial line. move it up and prepare to read more */ + size_t len; + + len = strlen(s_ATBufferCur); + + memmove(s_ATBuffer, s_ATBufferCur, len + 1); + p_read = s_ATBuffer + len; + s_ATBufferCur = s_ATBuffer; + } + /* Otherwise, (p_eol !- NULL) there is a complete line */ + /* that will be returned the while () loop below */ + } + + while (p_eol == NULL) { + if (0 == MAX_AT_RESPONSE - (p_read - s_ATBuffer)) { + LOGE("ERROR: Input line exceeded buffer\n"); + /* ditch buffer and start over again */ + s_ATBufferCur = s_ATBuffer; + *s_ATBufferCur = '\0'; + p_read = s_ATBuffer; + } + + do { + while (s_fd > 0) { + struct pollfd pollfds[1] = {{s_fd, POLLIN, 0}}; + int ret; + + do { + ret = poll(pollfds, 1, -1); + } while ((ret < 0) && (errno == EINTR)); + + if (pollfds[0].revents & (POLLERR | POLLHUP | POLLNVAL)) { + break; + } else if (pollfds[0].revents & (POLLIN)) { + break; + } + }; + + count = (s_fd == -1) ? 0 : read(s_fd, p_read, + MAX_AT_RESPONSE - (p_read - s_ATBuffer)); + } while (count < 0 && errno == EINTR); + + if (count > 0) { + AT_DUMP( "<< ", p_read, count ); + s_readCount += count; + + p_read[count] = '\0'; + + // skip over leading newlines + while (*s_ATBufferCur == '\r' || *s_ATBufferCur == '\n') + s_ATBufferCur++; + + p_eol = findNextEOL(s_ATBufferCur); + p_read += count; + } else if (count <= 0) { + /* read error encountered or EOF reached */ + if(count == 0) { + LOGD("atchannel: EOF reached"); + } else { + LOGD("atchannel: read error %s", strerror(errno)); + } + return NULL; + } + } + + /* a full line in the buffer. Place a \0 over the \r and return */ + + ret = s_ATBufferCur; + *p_eol = '\0'; + s_ATBufferCur = p_eol + 1; /* this will always be <= p_read, */ + /* and there will be a \0 at *p_read */ + + LOGD("AT< %s", ret); + return ret; +} + + +static void onReaderClosed() +{ + LOGE("%s", __func__); + if (s_onReaderClosed != NULL && s_readerClosed == 0) { + + pthread_mutex_lock(&cm_command_mutex); + + s_readerClosed = 1; + + pthread_cond_signal(&cm_command_cond); + + pthread_mutex_unlock(&cm_command_mutex); + + s_onReaderClosed(); + } +} + + +static void *readerLoop(void *arg) +{ + (void)arg; + + for (;;) { + const char * line; + + line = readline(); + + if (line == NULL) { + break; + } + + if(isSMSUnsolicited(line)) { + char *line1; + const char *line2; + + // The scope of string returned by 'readline()' is valid only + // till next call to 'readline()' hence making a copy of line + // before calling readline again. + line1 = strdup(line); + line2 = readline(); + + if (line2 == NULL) { + break; + } + + if (s_unsolHandler != NULL) { + s_unsolHandler (line1, line2); + } + free(line1); + } else { + processLine(line); + } + } + + onReaderClosed(); + + return NULL; +} + +/** + * Sends string s to the radio with a \r appended. + * Returns AT_ERROR_* on error, 0 on success + * + * This function exists because as of writing, android libc does not + * have buffered stdio. + */ +static int writeline (const char *s) +{ + size_t cur = 0; + size_t len = strlen(s); + ssize_t written; + static char at_command[64]; + + if (s_fd < 0 || s_readerClosed > 0) { + return AT_ERROR_CHANNEL_CLOSED; + } + + LOGD("AT> %s", s); + + AT_DUMP( ">> ", s, strlen(s) ); + +#if 1 //send '\r' maybe fail via USB controller: Intel Corporation 7 Series/C210 Series Chipset Family USB xHCI Host Controller (rev 04) + if (len < (sizeof(at_command) - 1)) { + strcpy(at_command, s); + at_command[len++] = '\r'; + s = (const char *)at_command; + } +#endif + + /* the main string */ + while (cur < len) { + do { + written = write (s_fd, s + cur, len - cur); + } while (written < 0 && errno == EINTR); + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + cur += written; + } + +#if 1 //Quectel send '\r' maybe fail via USB controller: Intel Corporation 7 Series/C210 Series Chipset Family USB xHCI Host Controller (rev 04) + if (s == (const char *)at_command) { + return 0; + } +#endif + + /* the \r */ + + do { + written = write (s_fd, "\r" , 1); + } while ((written < 0 && errno == EINTR) || (written == 0)); + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + return 0; +} +static int writeCtrlZ (const char *s) +{ + size_t cur = 0; + size_t len = strlen(s); + ssize_t written; + + if (s_fd < 0 || s_readerClosed > 0) { + return AT_ERROR_CHANNEL_CLOSED; + } + + LOGD("AT> %s^Z", s); + + AT_DUMP( ">* ", s, strlen(s) ); + + /* the main string */ + while (cur < len) { + do { + written = write (s_fd, s + cur, len - cur); + } while (written < 0 && errno == EINTR); + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + cur += written; + } + + /* the ^Z */ + + do { + written = write (s_fd, "\032" , 1); + } while ((written < 0 && errno == EINTR) || (written == 0)); + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + return 0; +} + +static int writeraw (const char *s, size_t len) { + size_t cur = 0; + ssize_t written; + + if (s_fd < 0 || s_readerClosed > 0) { + return AT_ERROR_CHANNEL_CLOSED; + } + + /* the main string */ + while (cur < len) { + struct pollfd pollfds[1] = {{s_fd, POLLOUT, 0}}; + int ret; + + ret = poll(pollfds, 1, -1); + if (ret <= 0) + break; + + do { + written = write (s_fd, s + cur, len - cur); + } while (written < 0 && errno == EINTR); + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + cur += written; + } + + if (written < 0) { + return AT_ERROR_GENERIC; + } + + return cur; +} + +static void clearPendingCommand() +{ + if (sp_response != NULL) { + at_response_free(sp_response); + } + + sp_response = NULL; + s_responsePrefix = NULL; + s_smsPDU = NULL; +} + + +/** + * Starts AT handler on stream "fd' + * returns 0 on success, -1 on error + */ +int at_open(int fd, ATUnsolHandler h, int proxy) +{ + int ret; + pthread_attr_t attr; + + s_fd = fd; + s_unsolHandler = h; + s_readerClosed = 0; + s_atc_proxy = proxy; + + s_responsePrefix = NULL; + s_smsPDU = NULL; + sp_response = NULL; + + pthread_attr_init (&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + + ret = pthread_create(&s_tid_reader, &attr, readerLoop, NULL); + + if (ret < 0) { + LOGE("readerLoop create fail!"); + perror ("pthread_create\n"); + return -1; + } + + return 0; +} + +/* FIXME is it ok to call this from the reader and the command thread? */ +void at_close() +{ + dbg_time("at_close"); + if (s_fd >= 0) { + close(s_fd); + } + s_fd = -1; + + pthread_mutex_lock(&cm_command_mutex); + + s_readerClosed = 1; + + pthread_cond_signal(&cm_command_cond); + + pthread_mutex_unlock(&cm_command_mutex); + + /* the reader thread should eventually die */ +} + +static ATResponse * at_response_new() +{ + return (ATResponse *) calloc(1, sizeof(ATResponse)); +} + +void at_response_free(ATResponse *p_response) +{ + ATLine *p_line; + + if (p_response == NULL) return; + + p_line = p_response->p_intermediates; + + while (p_line != NULL) { + ATLine *p_toFree; + + p_toFree = p_line; + p_line = p_line->p_next; + + free(p_toFree->line); + free(p_toFree); + } + + free (p_response->finalResponse); + free (p_response); +} + +/** + * The line reader places the intermediate responses in reverse order + * here we flip them back + */ +static void reverseIntermediates(ATResponse *p_response) +{ + ATLine *pcur,*pnext; + + pcur = p_response->p_intermediates; + p_response->p_intermediates = NULL; + + while (pcur != NULL) { + pnext = pcur->p_next; + pcur->p_next = p_response->p_intermediates; + p_response->p_intermediates = pcur; + pcur = pnext; + } +} + +/** + * Internal send_command implementation + * Doesn't lock or call the timeout callback + * + * timeoutMsec == 0 means infinite timeout + */ +static int at_send_command_full_nolock (const char *command, ATCommandType type, + const char *responsePrefix, const char *smspdu, + long long timeoutMsec, ATResponse **pp_outResponse) +{ + int err = 0; + + if (!timeoutMsec) + timeoutMsec = 15000; + + if(sp_response != NULL) { + err = AT_ERROR_COMMAND_PENDING; + goto error; + } + + if (command != NULL) + err = writeline (command); + + if (err < 0) { + printf("%s errno: %d (%s)\n", __func__, errno, strerror(errno)); + goto error; + } + + s_type = type; + s_responsePrefix = responsePrefix; + s_smsPDU = smspdu; + sp_response = at_response_new(); + + while (sp_response->finalResponse == NULL && s_readerClosed == 0) { + err = pthread_cond_timeout_np(&cm_command_cond, &cm_command_mutex, timeoutMsec); + + if (err == ETIMEDOUT) { + err = AT_ERROR_TIMEOUT; + goto error; + } + } + + if (pp_outResponse == NULL) { + at_response_free(sp_response); + } else { + /* line reader stores intermediate responses in reverse order */ + reverseIntermediates(sp_response); + *pp_outResponse = sp_response; + } + + sp_response = NULL; + + if(s_readerClosed > 0) { + err = AT_ERROR_CHANNEL_CLOSED; + goto error; + } + + err = 0; +error: + clearPendingCommand(); + + return err; +} + +/** + * Internal send_command implementation + * + * timeoutMsec == 0 means infinite timeout + */ +static int at_send_command_full (const char *command, ATCommandType type, + const char *responsePrefix, const char *smspdu, + long long timeoutMsec, ATResponse **pp_outResponse) +{ + int err; + + if (0 != pthread_equal(s_tid_reader, pthread_self())) { + /* cannot be called from reader thread */ + return AT_ERROR_INVALID_THREAD; + } + + pthread_mutex_lock(&cm_command_mutex); + + err = at_send_command_full_nolock(command, type, + responsePrefix, smspdu, + timeoutMsec, pp_outResponse); + + pthread_mutex_unlock(&cm_command_mutex); + + if (err == AT_ERROR_TIMEOUT && s_onTimeout != NULL) { + s_onTimeout(); + } + + return err; +} + + +/** + * Issue a single normal AT command with no intermediate response expected + * + * "command" should not include \r + * pp_outResponse can be NULL + * + * if non-NULL, the resulting ATResponse * must be eventually freed with + * at_response_free + */ +int at_send_command (const char *command, ATResponse **pp_outResponse) +{ + int err; + + err = at_send_command_full (command, NO_RESULT, NULL, + NULL, 0, pp_outResponse); + + return err; +} + + +int at_send_command_singleline (const char *command, + const char *responsePrefix, + ATResponse **pp_outResponse) +{ + int err; + + err = at_send_command_full (command, SINGLELINE, responsePrefix, + NULL, 0, pp_outResponse); + + if (err == 0 && pp_outResponse != NULL + && (*pp_outResponse)->success > 0 + && (*pp_outResponse)->p_intermediates == NULL + ) { + /* successful command must have an intermediate response */ + at_response_free(*pp_outResponse); + *pp_outResponse = NULL; + return AT_ERROR_INVALID_RESPONSE; + } + + return err; +} + + +int at_send_command_numeric (const char *command, + ATResponse **pp_outResponse) +{ + int err; + + err = at_send_command_full (command, NUMERIC, NULL, + NULL, 0, pp_outResponse); + + if (err == 0 && pp_outResponse != NULL + && (*pp_outResponse)->success > 0 + && (*pp_outResponse)->p_intermediates == NULL + ) { + /* successful command must have an intermediate response */ + at_response_free(*pp_outResponse); + *pp_outResponse = NULL; + return AT_ERROR_INVALID_RESPONSE; + } + + return err; +} + + +int at_send_command_sms (const char *command, + const char *pdu, + const char *responsePrefix, + ATResponse **pp_outResponse) +{ + int err; + + err = at_send_command_full (command, SINGLELINE, responsePrefix, + pdu, 0, pp_outResponse); + + if (err == 0 && pp_outResponse != NULL + && (*pp_outResponse)->success > 0 + && (*pp_outResponse)->p_intermediates == NULL + ) { + /* successful command must have an intermediate response */ + at_response_free(*pp_outResponse); + *pp_outResponse = NULL; + return AT_ERROR_INVALID_RESPONSE; + } + + return err; +} + +int at_send_command_multiline (const char *command, + const char *responsePrefix, + ATResponse **pp_outResponse) +{ + int err; + + err = at_send_command_full (command, MULTILINE, responsePrefix, + NULL, 0, pp_outResponse); + + return err; +} + +int at_send_command_raw (const char *command, + const char *raw_data, unsigned int raw_len, + const char *responsePrefix, + ATResponse **pp_outResponse) +{ + int err; + + s_raw_data = raw_data; + s_raw_len = raw_len; + err = at_send_command_full (command, SINGLELINE, responsePrefix, + NULL, 0, pp_outResponse); + + return err; +} + +/** + * Periodically issue an AT command and wait for a response. + * Used to ensure channel has start up and is active + */ + +int at_handshake() +{ + int i; + int err = 0; + + if (0 != pthread_equal(s_tid_reader, pthread_self())) { + /* cannot be called from reader thread */ + return AT_ERROR_INVALID_THREAD; + } + + pthread_mutex_lock(&cm_command_mutex); + + for (i = 0 ; i < HANDSHAKE_RETRY_COUNT ; i++) { + /* some stacks start with verbose off */ + err = at_send_command_full_nolock ("ATE0Q0V1", NO_RESULT, + NULL, NULL, HANDSHAKE_TIMEOUT_MSEC, NULL); + + if (err == 0) { + break; + } + } + + pthread_mutex_unlock(&cm_command_mutex); + + if (err == 0) { + /* pause for a bit to let the input buffer drain any unmatched OK's + (they will appear as extraneous unsolicited responses) */ + + sleepMsec(HANDSHAKE_TIMEOUT_MSEC); + } + + return err; +} + +AT_CME_Error at_get_cme_error(const ATResponse *p_response) +{ + int ret; + int err; + char *p_cur; + + if (p_response == NULL) + return CME_ERROR_NON_CME; + + if (p_response->success > 0) { + return CME_SUCCESS; + } + + if (p_response->finalResponse == NULL + || !strStartsWith(p_response->finalResponse, "+CME ERROR:") + ) { + return CME_ERROR_NON_CME; + } + + p_cur = p_response->finalResponse; + err = at_tok_start(&p_cur); + + if (err < 0) { + return CME_ERROR_NON_CME; + } + + err = at_tok_nextint(&p_cur, &ret); + + if (err < 0) { + return CME_ERROR_NON_CME; + } + + return (AT_CME_Error) ret; +} + +/** This callback is invoked on the command thread */ +void at_set_on_timeout(void (*onTimeout)(void)) +{ + s_onTimeout = onTimeout; +} + +/** + * This callback is invoked on the reader thread (like ATUnsolHandler) + * when the input stream closes before you call at_close + * (not when you call at_close()) + * You should still call at_close() + */ +void at_set_on_reader_closed(void (*onClose)(void)) +{ + s_onReaderClosed = onClose; +} diff --git a/wwan/app/quectel_cm_5G/src/atchannel.h b/wwan/app/quectel_cm_5G/src/atchannel.h new file mode 100644 index 0000000..cce28b1 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/atchannel.h @@ -0,0 +1,152 @@ +/* //device/system/reference-ril/atchannel.h +** +** Copyright 2006, The Android Open Source Project +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#ifndef ATCHANNEL_H +#define ATCHANNEL_H 1 + +#ifdef __cplusplus +extern "C" { +#endif + +/* define AT_DEBUG to send AT traffic to /tmp/radio-at.log" */ +#define AT_DEBUG 0 + +#if AT_DEBUG +extern void AT_DUMP(const char* prefix, const char* buff, int len); +#else +#define AT_DUMP(prefix,buff,len) do{}while(0) +#endif + +#define AT_ERROR_GENERIC -1 +#define AT_ERROR_COMMAND_PENDING -2 +#define AT_ERROR_CHANNEL_CLOSED -3 +#define AT_ERROR_TIMEOUT -4 +#define AT_ERROR_INVALID_THREAD -5 /* AT commands may not be issued from + reader thread (or unsolicited response + callback */ +#define AT_ERROR_INVALID_RESPONSE -6 /* eg an at_send_command_singleline that + did not get back an intermediate + response */ + + +typedef enum { + NO_RESULT, /* no intermediate response expected */ + NUMERIC, /* a single intermediate response starting with a 0-9 */ + SINGLELINE, /* a single intermediate response starting with a prefix */ + MULTILINE /* multiple line intermediate response + starting with a prefix */ +} ATCommandType; + +/** a singly-lined list of intermediate responses */ +typedef struct ATLine { + struct ATLine *p_next; + char *line; +} ATLine; + +/** Free this with at_response_free() */ +typedef struct { + int success; /* true if final response indicates + success (eg "OK") */ + char *finalResponse; /* eg OK, ERROR */ + ATLine *p_intermediates; /* any intermediate responses */ +} ATResponse; + +/** + * a user-provided unsolicited response handler function + * this will be called from the reader thread, so do not block + * "s" is the line, and "sms_pdu" is either NULL or the PDU response + * for multi-line TS 27.005 SMS PDU responses (eg +CMT:) + */ +typedef void (*ATUnsolHandler)(const char *s, const char *sms_pdu); + +int at_open(int fd, ATUnsolHandler h, int proxy); +void at_close(); + +/* This callback is invoked on the command thread. + You should reset or handshake here to avoid getting out of sync */ +void at_set_on_timeout(void (*onTimeout)(void)); +/* This callback is invoked on the reader thread (like ATUnsolHandler) + when the input stream closes before you call at_close + (not when you call at_close()) + You should still call at_close() + It may also be invoked immediately from the current thread if the read + channel is already closed */ +void at_set_on_reader_closed(void (*onClose)(void)); + +int at_send_command_singleline (const char *command, + const char *responsePrefix, + ATResponse **pp_outResponse); + +int at_send_command_numeric (const char *command, + ATResponse **pp_outResponse); + +int at_send_command_multiline (const char *command, + const char *responsePrefix, + ATResponse **pp_outResponse); + +int at_send_command_raw (const char *command, + const char *raw_data, unsigned int raw_len, + const char *responsePrefix, + ATResponse **pp_outResponse); + +int at_handshake(); + +int at_send_command (const char *command, ATResponse **pp_outResponse); + +int at_send_command_sms (const char *command, const char *pdu, + const char *responsePrefix, + ATResponse **pp_outResponse); + +void at_response_free(ATResponse *p_response); + +int strStartsWith(const char *line, const char *prefix); + +typedef enum { + CME_ERROR_NON_CME = -1, + CME_SUCCESS = 0, + + CME_OPERATION_NOT_ALLOWED = 3, + CME_OPERATION_NOT_SUPPORTED = 4, + CME_PH_SIM_PIN= 5, + CME_PH_FSIM_PIN = 6, + CME_PH_FSIM_PUK = 7, + CME_SIM_NOT_INSERTED =10, + CME_SIM_PIN_REQUIRED = 11, + CME_SIM_PUK_REQUIRED = 12, + CME_FAILURE = 13, + CME_SIM_BUSY = 14, + CME_SIM_WRONG = 15, + CME_INCORRECT_PASSWORD = 16, + CME_SIM_PIN2_REQUIRED = 17, + CME_SIM_PUK2_REQUIRED = 18, + CME_MEMORY_FULL = 20, + CME_INVALID_INDEX = 21, + CME_NOT_FOUND = 22, + CME_MEMORY_FAILURE = 23, + CME_STRING_TO_LONG = 24, + CME_INVALID_CHAR = 25, + CME_DIALSTR_TO_LONG = 26, + CME_INVALID_DIALCHAR = 27, +} AT_CME_Error; + +AT_CME_Error at_get_cme_error(const ATResponse *p_response); + +#ifdef __cplusplus +} +#endif + +#endif /*ATCHANNEL_H*/ diff --git a/wwan/app/quectel_cm_5G/src/configure.ac b/wwan/app/quectel_cm_5G/src/configure.ac new file mode 100644 index 0000000..f4c60ea --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/configure.ac @@ -0,0 +1,48 @@ +# -*- Autoconf -*- +# Process this file with autoconf to produce a configure script. + +AC_PREREQ([2.61]) +AC_INIT([quectel-CM], [1.0], [fae-support@quectel.com]) +AC_CONFIG_HEADERS([config.h]) + +# Checks for programs. +AC_PROG_CC + +# Checks for libraries. + +# Checks for header files. + +# Checks for typedefs, structures, and compiler characteristics. +AC_ARG_WITH(sanitized-headers, + AS_HELP_STRING([--with-sanitized-headers=DIR], + [Specify the location of the sanitized Linux headers]), + [CPPFLAGS="$CPPFLAGS -idirafter $withval"]) + +AC_ARG_WITH([qrtr], + AC_HELP_STRING([--with-qrtr], + [enable qrtr, building which use qrtr])) + +if (test "x${with_qrtr}" = "xyes"); then + #AC_DEFINE(ENABLE_USEQTRT, 1, [Define if uses qrtr]) + AC_CHECK_HEADERS([linux/qrtr.h linux/rmnet_data.h]) +fi + +AM_CONDITIONAL(USE_QRTR, test "x${with_qrtr}" = "xyes") + +AC_ARG_WITH([msm-ipc], + AC_HELP_STRING([--with-msm-ipc], + [enable msm-ipc, building which use qrtr])) + +if (test "x${with_msm_ipc}" = "xyes"); then + #AC_DEFINE(ENABLE_USEQTRT, 1, [Define if uses qrtr]) + AC_CHECK_HEADERS([linux/msm_ipc.h linux/rmnet_data.h]) +fi + +AM_CONDITIONAL(USE_MSM_IPC, test "x${with_msm_ipc}" = "xyes") + +# Checks for library functions. + +# Does not strictly follow GNU Coding standards +AM_INIT_AUTOMAKE([foreign subdir-objects]) +AC_CONFIG_FILES([Makefile]) +AC_OUTPUT diff --git a/wwan/app/quectel_cm_5G/src/default.script b/wwan/app/quectel_cm_5G/src/default.script new file mode 100644 index 0000000..26b95c1 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/default.script @@ -0,0 +1,63 @@ +#!/bin/sh +# Busybox udhcpc dispatcher script. Copyright (C) 2009 by Axel Beckert. +# +# Based on the busybox example scripts and the old udhcp source +# package default.* scripts. + +RESOLV_CONF="/etc/resolv.conf" + +case $1 in + bound|renew) + [ -n "$broadcast" ] && BROADCAST="broadcast $broadcast" + [ -n "$subnet" ] && NETMASK="netmask $subnet" + + /sbin/ifconfig $interface $ip $BROADCAST $NETMASK + + if [ -n "$router" ]; then + echo "$0: Resetting default routes" + while /sbin/route del default gw 0.0.0.0 dev $interface; do :; done + + metric=0 + for i in $router; do + /sbin/route add default gw $i dev $interface metric $metric + metric=$(($metric + 1)) + done + fi + + # Update resolver configuration file + R="" + [ -n "$domain" ] && R="domain $domain +" + for i in $dns; do + echo "$0: Adding DNS $i" + R="${R}nameserver $i +" + done + + if [ -x /sbin/resolvconf ]; then + echo -n "$R" | resolvconf -a "${interface}.udhcpc" + else + echo -n "$R" > "$RESOLV_CONF" + fi + ;; + + deconfig) + if [ -x /sbin/resolvconf ]; then + resolvconf -d "${interface}.udhcpc" + fi + /sbin/ifconfig $interface 0.0.0.0 + ;; + + leasefail) + echo "$0: Lease failed: $message" + ;; + + nak) + echo "$0: Received a NAK: $message" + ;; + + *) + echo "$0: Unknown udhcpc command: $1"; + exit 1; + ;; +esac diff --git a/wwan/app/quectel_cm_5G/src/default.script_ip b/wwan/app/quectel_cm_5G/src/default.script_ip new file mode 100644 index 0000000..24f8e59 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/default.script_ip @@ -0,0 +1,61 @@ +#!/bin/sh +# Busybox udhcpc dispatcher script. Copyright (C) 2009 by Axel Beckert. +# +# Based on the busybox example scripts and the old udhcp source +# package default.* scripts. + +RESOLV_CONF="/etc/resolv.conf" +IPCMD=`which ip` + +case $1 in + bound|renew) + $IPCMD address add broadcast $broadcast $ip/$subnet dev $interface + + if [ -n "$router" ]; then + echo "$0: Resetting default routes" + while $IPCMD route del default dev $interface; do :; done + + metric=0 + for i in $router; do + $IPCMD route add default dev $interface via $router metric $metric + metric=$(($metric + 1)) + done + fi + + # Update resolver configuration file + R="" + [ -n "$domain" ] && R="domain $domain +" + for i in $dns; do + echo "$0: Adding DNS $i" + R="${R}nameserver $i +" + done + + if [ -x /sbin/resolvconf ]; then + echo -n "$R" | resolvconf -a "${interface}.udhcpc" + else + echo -n "$R" > "$RESOLV_CONF" + fi + ;; + + deconfig) + if [ -x /sbin/resolvconf ]; then + resolvconf -d "${interface}.udhcpc" + fi + $IPCMD address flush dev $interface + ;; + + leasefail) + echo "$0: Lease failed: $message" + ;; + + nak) + echo "$0: Received a NAK: $message" + ;; + + *) + echo "$0: Unknown udhcpc command: $1"; + exit 1; + ;; +esac diff --git a/wwan/app/quectel_cm_5G/src/device.c b/wwan/app/quectel_cm_5G/src/device.c new file mode 100644 index 0000000..fba46c6 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/device.c @@ -0,0 +1,746 @@ +/****************************************************************************** + @file device.c + @brief QMI device dirver. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "QMIThread.h" +#include "ethtool-copy.h" + +#define USB_CLASS_VENDOR_SPEC 0xff +#define USB_CLASS_COMM 2 +#define USB_CDC_SUBCLASS_ACM 0x02 +#define USB_CDC_SUBCLASS_ETHERNET 0x06 +#define USB_CDC_SUBCLASS_NCM 0x0d +#define USB_CDC_SUBCLASS_MBIM 0x0e +#define USB_CLASS_WIRELESS_CONTROLLER 0xe0 + +#define CM_MAX_PATHLEN 256 + +#define CM_INVALID_VAL (~((int)0)) +/* get first line from file 'fname' + * And convert the content into a hex number, then return this number */ +static int file_get_value(const char *fname, int base) +{ + FILE *fp = NULL; + long num; + char buff[32 + 1] = {'\0'}; + char *endptr = NULL; + + fp = fopen(fname, "r"); + if (!fp) goto error; + if (fgets(buff, sizeof(buff), fp) == NULL) + goto error; + fclose(fp); + + num = (int)strtol(buff, &endptr, base); + if (errno == ERANGE && (num == LONG_MAX || num == LONG_MIN)) + goto error; + /* if there is no digit in buff */ + if (endptr == buff) + goto error; + + if (debug_qmi) + dbg_time("(%s) = %lx", fname, num); + return (int)num; + +error: + if (fp) fclose(fp); + return CM_INVALID_VAL; +} + +/* + * This function will search the directory 'dirname' and return the first child. + * '.' and '..' is ignored by default + */ +static int dir_get_child(const char *dirname, char *buff, unsigned bufsize, const char *prefix) +{ + struct dirent *entptr = NULL; + DIR *dirptr; + + buff[0] = 0; + + dirptr = opendir(dirname); + if (!dirptr) + return -1; + + while ((entptr = readdir(dirptr))) { + if (entptr->d_name[0] == '.') + continue; + if (prefix && strlen(prefix) && strncmp(entptr->d_name, prefix, strlen(prefix))) + continue; + snprintf(buff, bufsize, "%.31s", entptr->d_name); + break; + } + closedir(dirptr); + + return 0; +} + +static int conf_get_val(const char *fname, const char *key) +{ + char buff[128] = {'\0'}; + FILE *fp = fopen(fname, "r"); + if (!fp) + return CM_INVALID_VAL; + + while (fgets(buff, sizeof(buff)-1, fp)) { + char prefix[128] = {'\0'}; + char tail[128] = {'\0'}; + /* To eliminate cppcheck warnning: Assume string length is no more than 15 */ + sscanf(buff, "%15[^=]=%15s", prefix, tail); + if (!strncasecmp(prefix, key, strlen(key))) { + fclose(fp); + return atoi(tail); + } + } + + fclose(fp); + return CM_INVALID_VAL; +} + +static void query_usb_device_info(char *path, struct usb_device_info *p) { + size_t offset = strlen(path); + + memset(p, 0, sizeof(*p)); + + path[offset] = '\0'; + strcat(path, "/idVendor"); + p->idVendor = file_get_value(path, 16); + + if (p->idVendor == CM_INVALID_VAL) + return; + + path[offset] = '\0'; + strcat(path, "/idProduct"); + p->idProduct = file_get_value(path, 16); + + path[offset] = '\0'; + strcat(path, "/busnum"); + p->busnum = file_get_value(path, 10); + + path[offset] = '\0'; + strcat(path, "/devnum"); + p->devnum = file_get_value(path, 10); + + path[offset] = '\0'; + strcat(path, "/bNumInterfaces"); + p->bNumInterfaces = file_get_value(path, 10); + + path[offset] = '\0'; +} + +static void query_usb_interface_info(char *path, struct usb_interface_info *p) { + char driver[128]; + size_t offset = strlen(path); + int n; + + memset(p, 0, sizeof(*p)); + + path[offset] = '\0'; + strcat(path, "/bNumEndpoints"); + p->bInterfaceClass = file_get_value(path, 16); + + path[offset] = '\0'; + strcat(path, "/bInterfaceClass"); + p->bInterfaceClass = file_get_value(path, 16); + + path[offset] = '\0'; + strcat(path, "/bInterfaceSubClass"); + p->bInterfaceSubClass = file_get_value(path, 16); + + path[offset] = '\0'; + strcat(path, "/bInterfaceProtocol"); + p->bInterfaceProtocol = file_get_value(path, 16); + + path[offset] = '\0'; + strcat(path, "/driver"); + n = readlink(path, driver, sizeof(driver)); + if (n > 0) { + driver[n] = 0; + if (debug_qmi) dbg_time("driver -> %s", driver); + n = strlen(driver); + while (n > 0) { + if (driver[n] == '/') + break; + n--; + } + strncpy(p->driver, &driver[n+1], sizeof(p->driver) - 1); + } + + path[offset] = '\0'; +} + +static int detect_path_cdc_wdm_or_qcqmi(char *path, char *devname, size_t bufsize) +{ + size_t offset = strlen(path); + char tmp[32]; + + devname[0] = 0; + + if (access(path, R_OK)) + return -1; + + path[offset] = '\0'; + strcat(path, "/GobiQMI"); + if (!access(path, R_OK)) + goto step_1; + + path[offset] = '\0'; + strcat(path, "/usbmisc"); + if (!access(path, R_OK)) + goto step_1; + + path[offset] = '\0'; + strcat(path, "/usb"); + if (!access(path, R_OK)) + goto step_1; + + return -1; + +step_1: + /* get device(qcqmiX|cdc-wdmX) */ + if (debug_qmi) dbg_time("%s", path); + dir_get_child(path, tmp, sizeof(tmp), NULL); + if (tmp[0] == '\0') + return -1; + + /* There is a chance that, no device(qcqmiX|cdc-wdmX) is generated. We should warn user about that! */ + snprintf(devname, bufsize, "/dev/%s", tmp); + if (access(devname, R_OK | F_OK) && errno == ENOENT) + { + int major, minor; + + dbg_time("access %s failed, errno: %d (%s)", devname, errno, strerror(errno)); + strcat(path, "/"); + strcat(path, tmp); + strcat(path, "/uevent"); + major = conf_get_val(path, "MAJOR"); + minor = conf_get_val(path, "MINOR"); + + if(major == CM_INVALID_VAL || minor == CM_INVALID_VAL) + dbg_time("get major and minor failed"); + else if (mknod(devname, S_IFCHR|0666, (((major & 0xfff) << 8) | (minor & 0xff) | ((minor & 0xfff00) << 12)))) + dbg_time("please mknod %s c %d %d", devname, major, minor); + } + + return 0; +} + +/* To detect the device info of the modem. + * return: + * FALSE -> fail + * TRUE -> ok + */ +BOOL qmidevice_detect(char *qmichannel, char *usbnet_adapter, unsigned bufsize, PROFILE_T *profile) { + struct dirent* ent = NULL; + DIR *pDir; + const char *rootdir = "/sys/bus/usb/devices"; + struct { + char path[255*2]; + } *pl; + pl = (typeof(pl)) malloc(sizeof(*pl)); + memset(pl, 0x00, sizeof(*pl)); + + pDir = opendir(rootdir); + if (!pDir) { + dbg_time("opendir %s failed: %s", rootdir, strerror(errno)); + goto error; + } + + while ((ent = readdir(pDir)) != NULL) { + char netcard[32+1] = {'\0'}; + char devname[32+5] = {'\0'}; //+strlen("/dev/") + int netIntf; + int driver_type; + + if (ent->d_name[0] == 'u') + continue; + + snprintf(pl->path, sizeof(pl->path), "%s/%s", rootdir, ent->d_name); + query_usb_device_info(pl->path, &profile->usb_dev); + if (profile->usb_dev.idVendor == CM_INVALID_VAL) + continue; + + if (profile->usb_dev.idVendor == 0x2c7c || profile->usb_dev.idVendor == 0x05c6) { + dbg_time("Find %s/%s idVendor=0x%x idProduct=0x%x, bus=0x%03x, dev=0x%03x", + rootdir, ent->d_name, profile->usb_dev.idVendor, profile->usb_dev.idProduct, + profile->usb_dev.busnum, profile->usb_dev.devnum); + } + + /* get network interface */ + /* NOTICE: there is a case that, bNumberInterface=6, but the net interface is 8 */ + /* toolchain-mips_24kc_gcc-5.4.0_musl donot support GLOB_BRACE */ + /* RG500U's MBIM is at inteface 0 */ + for (netIntf = 0; netIntf < (profile->usb_dev.bNumInterfaces + 8); netIntf++) { + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d/net", rootdir, ent->d_name, netIntf); + dir_get_child(pl->path, netcard, sizeof(netcard), NULL); + if (netcard[0]) + break; + } + + if (netcard[0] == '\0') { //for centos 2.6.x + const char *n= "usb0"; + const char *c = "qcqmi0"; + + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.4/net:%s", rootdir, ent->d_name, n); + if (!access(pl->path, F_OK)) { + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.4/GobiQMI:%s", rootdir, ent->d_name, c); + if (!access(pl->path, F_OK)) { + snprintf(qmichannel, bufsize, "/dev/%s", c); + snprintf(usbnet_adapter, bufsize, "%s", n); + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.4", rootdir, ent->d_name); + query_usb_interface_info(pl->path, &profile->usb_intf); + break; + } + } + } + + if (netcard[0] == '\0') + continue; + + /* not '-i iface' */ + if (usbnet_adapter[0] && strcmp(usbnet_adapter, netcard)) + continue; + + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d", rootdir, ent->d_name, netIntf); + query_usb_interface_info(pl->path, &profile->usb_intf); + driver_type = get_driver_type(profile); + + if (driver_type == SOFTWARE_QMI || driver_type == SOFTWARE_MBIM) { + detect_path_cdc_wdm_or_qcqmi(pl->path, devname, sizeof(devname)); + } + else if (driver_type == SOFTWARE_ECM_RNDIS_NCM) + { + int atIntf = -1; + + if (profile->usb_dev.idVendor == 0x2c7c) { //Quectel + switch (profile->usb_dev.idProduct) { //EC200U + case 0x0901: //EC200U + case 0x8101: //RG801H + atIntf = 2; + break; + case 0x0900: //RG500U + atIntf = 4; + break; + case 0x6026: //EC200T + case 0x6005: //EC200A + case 0x6002: //EC200S + case 0x6001: //EC100Y + atIntf = 3; + break; + default: + dbg_time("unknow at interface for USB idProduct:%04x\n", profile->usb_dev.idProduct); + break; + } + } + + if (atIntf != -1) { + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d", rootdir, ent->d_name, atIntf); + dir_get_child(pl->path, devname, sizeof(devname), "tty"); + if (devname[0] && !strcmp(devname, "tty")) { + snprintf(pl->path, sizeof(pl->path), "%s/%s:1.%d/tty", rootdir, ent->d_name, atIntf); + dir_get_child(pl->path, devname, sizeof(devname), "tty"); + } + } + } + + if (netcard[0] && devname[0]) { + if (devname[0] == '/') + snprintf(qmichannel, bufsize, "%s", devname); + else + snprintf(qmichannel, bufsize, "/dev/%s", devname); + snprintf(usbnet_adapter, bufsize, "%s", netcard); + dbg_time("Auto find qmichannel = %s", qmichannel); + dbg_time("Auto find usbnet_adapter = %s", usbnet_adapter); + break; + } + } + closedir(pDir); + + if (qmichannel[0] == '\0' || usbnet_adapter[0] == '\0') { + dbg_time("network interface '%s' or qmidev '%s' is not exist", usbnet_adapter, qmichannel); + goto error; + } + free(pl); + return TRUE; +error: + free(pl); + return FALSE; +} + +int mhidevice_detect(char *qmichannel, char *usbnet_adapter, PROFILE_T *profile) { + struct dirent* ent = NULL; + DIR *pDir; + const char *rootdir_mhi[] = {"/sys/bus/mhi_q/devices", "/sys/bus/mhi/devices", NULL}; + int i = 0; + char path[256]; + int find = 0; + + while (rootdir_mhi[i]) { + const char *rootdir = rootdir_mhi[i++]; + + pDir = opendir(rootdir); + if (!pDir) { + if (errno != ENOENT) + dbg_time("opendir %s failed: %s", rootdir, strerror(errno)); + continue; + } + + while ((ent = readdir(pDir)) != NULL) { + char netcard[32] = {'\0'}; + char devname[32] = {'\0'}; + int software_interface = SOFTWARE_QMI; + char *pNode = NULL; + + pNode = strstr(ent->d_name, "_IP_HW0"); //0306_00.01.00_IP_HW0 + if (!pNode) + continue; + + snprintf(path, sizeof(path), "%s/%.32s/net", rootdir, ent->d_name); + dir_get_child(path, netcard, sizeof(netcard), NULL); + if (!netcard[0]) + continue; + + if (usbnet_adapter[0] && strcmp(netcard, usbnet_adapter)) //not '-i x' + continue; + + if (!strcmp(rootdir, "/sys/bus/mhi/devices")) { + snprintf(path, sizeof(path), "%s/%.13s_IPCR", rootdir, ent->d_name); // 13 is sizeof(0306_00.01.00) + if (!access(path, F_OK)) { + /* we also need 'cat /dev/mhi_0306_00.01.00_pipe_14' to enable rmnet as like USB's DTR + or will get error 'requestSetEthMode QMUXResult = 0x1, QMUXError = 0x46' */ + sprintf(usbnet_adapter, "%s", netcard); + sprintf(qmichannel, "qrtr-%d", 3); // 3 is sdx modem's node id + profile->software_interface = SOFTWARE_QRTR; + find = 1; + break; + } + continue; + } + + snprintf(path, sizeof(path), "%s/%.13s_IPCR", rootdir, ent->d_name); + if (access(path, F_OK)) { + snprintf(path, sizeof(path), "%s/%.13s_QMI0", rootdir, ent->d_name); + if (access(path, F_OK)) { + snprintf(path, sizeof(path), "%s/%.13s_MBIM", rootdir, ent->d_name); + if (!access(path, F_OK)) + software_interface = SOFTWARE_MBIM; + } + } + if (access(path, F_OK)) + continue; + + strncat(path, "/mhi_uci_q", sizeof(path)-1); + dir_get_child(path, devname, sizeof(devname), NULL); + if (!devname[0]) + continue; + + sprintf(usbnet_adapter, "%s", netcard); + sprintf(qmichannel, "/dev/%s", devname); + profile->software_interface = software_interface; + find = 1; + break; + } + + closedir(pDir); + } + + return find; +} + +int atdevice_detect(char *atchannel, char *usbnet_adapter, PROFILE_T *profile) { + if (!access("/sys/class/net/sipa_dummy0", F_OK)) { + strcpy(usbnet_adapter, "sipa_dummy0"); + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "%s%d", "pcie", profile->pdp - 1); + } + else { + dbg_time("atdevice_detect failed"); + goto error; + } + + if (!access("/dev/stty_nr31", F_OK)) { + strcpy(atchannel, "/dev/stty_nr31"); + profile->software_interface = SOFTWARE_ECM_RNDIS_NCM; + } + else { + goto error; + } + + return 1; +error: + return 0; +} + + +int get_driver_type(PROFILE_T *profile) +{ + /* QMI_WWAN */ + if (profile->usb_intf.bInterfaceClass == USB_CLASS_VENDOR_SPEC) { + return SOFTWARE_QMI; + } + else if (profile->usb_intf.bInterfaceClass == USB_CLASS_COMM) { + switch (profile->usb_intf.bInterfaceSubClass) { + case USB_CDC_SUBCLASS_MBIM: + return SOFTWARE_MBIM; + break; + case USB_CDC_SUBCLASS_ETHERNET: + case USB_CDC_SUBCLASS_NCM: + return SOFTWARE_ECM_RNDIS_NCM; + break; + default: + break; + } + } + else if (profile->usb_intf.bInterfaceClass == USB_CLASS_WIRELESS_CONTROLLER) { + if (profile->usb_intf.bInterfaceSubClass == 1 && profile->usb_intf.bInterfaceProtocol == 3) + return SOFTWARE_ECM_RNDIS_NCM; + } + + dbg_time("%s unknow bInterfaceClass=%d, bInterfaceSubClass=%d", __func__, + profile->usb_intf.bInterfaceClass, profile->usb_intf.bInterfaceSubClass); + return DRV_INVALID; +} + +struct usbfs_getdriver +{ + unsigned int interface; + char driver[255 + 1]; +}; + +struct usbfs_ioctl +{ + int ifno; /* interface 0..N ; negative numbers reserved */ + int ioctl_code; /* MUST encode size + direction of data so the + * macros in give correct values */ + void *data; /* param buffer (in, or out) */ +}; + +#define IOCTL_USBFS_DISCONNECT _IO('U', 22) +#define IOCTL_USBFS_CONNECT _IO('U', 23) + +int usbfs_is_kernel_driver_alive(int fd, int ifnum) +{ + struct usbfs_getdriver getdrv; + getdrv.interface = ifnum; + if (ioctl(fd, USBDEVFS_GETDRIVER, &getdrv) < 0) { + dbg_time("%s ioctl USBDEVFS_GETDRIVER failed, kernel driver may be inactive", __func__); + return 0; + } + dbg_time("%s find interface %d has match the driver %s", __func__, ifnum, getdrv.driver); + return 1; +} + +void usbfs_detach_kernel_driver(int fd, int ifnum) +{ + struct usbfs_ioctl operate; + operate.data = NULL; + operate.ifno = ifnum; + operate.ioctl_code = IOCTL_USBFS_DISCONNECT; + if (ioctl(fd, USBDEVFS_IOCTL, &operate) < 0) { + dbg_time("%s detach kernel driver failed", __func__); + } else { + dbg_time("%s detach kernel driver success", __func__); + } +} + +void usbfs_attach_kernel_driver(int fd, int ifnum) +{ + struct usbfs_ioctl operate; + operate.data = NULL; + operate.ifno = ifnum; + operate.ioctl_code = IOCTL_USBFS_CONNECT; + if (ioctl(fd, USBDEVFS_IOCTL, &operate) < 0) { + dbg_time("%s detach kernel driver failed", __func__); + } else { + dbg_time("%s detach kernel driver success", __func__); + } +} + +int reattach_driver(PROFILE_T *profile) +{ + int ifnum = 4; + int fd; + char devpath[128] = {'\0'}; + snprintf(devpath, sizeof(devpath), "/dev/bus/usb/%03d/%03d", profile->usb_dev.busnum, profile->usb_dev.devnum); + fd = open(devpath, O_RDWR | O_NOCTTY); + if (fd < 0) + { + dbg_time("%s fail to open %s", __func__, devpath); + return -1; + } + usbfs_detach_kernel_driver(fd, ifnum); + usbfs_attach_kernel_driver(fd, ifnum); + close(fd); + return 0; +} + +#define SIOCETHTOOL 0x8946 +int ql_get_netcard_driver_info(const char *devname) +{ + int fd = -1; + struct ethtool_drvinfo drvinfo; + struct ifreq ifr; /* ifreq suitable for ethtool ioctl */ + + memset(&ifr, 0, sizeof(ifr)); + strcpy(ifr.ifr_name, devname); + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd < 0) { + dbg_time("Cannot get control socket: errno(%d)(%s)", errno, strerror(errno)); + return -1; + } + + drvinfo.cmd = ETHTOOL_GDRVINFO; + ifr.ifr_data = (void *)&drvinfo; + + if (ioctl(fd, SIOCETHTOOL, &ifr) < 0) { + dbg_time("ioctl() error: errno(%d)(%s)", errno, strerror(errno)); + close(fd); + return -1; + } + + dbg_time("netcard driver = %s, driver version = %s", drvinfo.driver, drvinfo.version); + + close(fd); + + return 0; +} + +int ql_get_netcard_carrier_state(const char *devname) +{ + int fd = -1; + struct ethtool_value edata; + struct ifreq ifr; /* ifreq suitable for ethtool ioctl */ + + memset(&ifr, 0, sizeof(ifr)); + strcpy(ifr.ifr_name, devname); + + fd = socket(AF_INET, SOCK_DGRAM, 0); + if (fd < 0) { + dbg_time("Cannot get control socket: errno(%d)(%s)", errno, strerror(errno)); + return -1; + } + + edata.cmd = ETHTOOL_GLINK; + edata.data = 0; + ifr.ifr_data = (void *)&edata; + + if (ioctl(fd, SIOCETHTOOL, &ifr) < 0) { + dbg_time("ioctl('%s') error: errno(%d)(%s)", devname, errno, strerror(errno)); + return -1; + } + + if (!edata.data) + dbg_time("netcard carrier = %d", edata.data); + + close(fd); + + return edata.data; +} + +static void *catch_log(void *arg) +{ + PROFILE_T *profile = (PROFILE_T *)arg; + int nreads = 0; + char tbuff[256+32]; + char filter[32]; + size_t tsize = strlen(get_time()) + 1; + + snprintf(filter, sizeof(filter), ":%d:%03d:", profile->usb_dev.busnum, profile->usb_dev.devnum); + + while(1) { + nreads = read(profile->usbmon_fd, tbuff + tsize, sizeof(tbuff) - tsize - 1); + if (nreads <= 0) { + if (nreads == -1 && errno == EINTR) + continue; + break; + } + + tbuff[tsize+nreads] = '\0'; // printf("%s", buff); + + if (!strstr(tbuff+tsize, filter)) + continue; + + snprintf(tbuff, sizeof(tbuff), "%s", get_time()); + tbuff[tsize-1] = ' '; + + fwrite(tbuff, strlen(tbuff), 1, profile->usbmon_logfile_fp); + } + + return NULL; +} + +int ql_capture_usbmon_log(PROFILE_T *profile, const char *log_path) +{ + char usbmon_path[256]; + pthread_t pt; + pthread_attr_t attr; + + if (access("/sys/module/usbmon", F_OK)) { + dbg_time("usbmon is not load, please execute \"modprobe usbmon\" or \"insmod usbmon.ko\""); + return -1; + } + + if (access("/sys/kernel/debug/usb", F_OK)) { + dbg_time("debugfs is not mount, please execute \"mount -t debugfs none_debugs /sys/kernel/debug\""); + return -1; + } + + snprintf(usbmon_path, sizeof(usbmon_path), "/sys/kernel/debug/usb/usbmon/%du", profile->usb_dev.busnum); + profile->usbmon_fd = open(usbmon_path, O_RDONLY); + if (profile->usbmon_fd < 0) { + dbg_time("open %s error(%d) (%s)", usbmon_path, errno, strerror(errno)); + return -1; + } + + snprintf(usbmon_path, sizeof(usbmon_path), "cat /sys/kernel/debug/usb/devices >> %s", log_path); + if (system(usbmon_path) == -1) {}; + + profile->usbmon_logfile_fp = fopen(log_path, "wb"); + if (!profile->usbmon_logfile_fp) { + dbg_time("open %s error(%d) (%s)", log_path, errno, strerror(errno)); + close(profile->usbmon_fd); + profile->usbmon_fd = -1; + return -1; + } + + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + + pthread_create(&pt, &attr, catch_log, (void *)profile); + + return 0; +} + +void ql_stop_usbmon_log(PROFILE_T *profile) { + if (profile->usbmon_fd > 0) + close(profile->usbmon_fd); + if (profile->usbmon_logfile_fp) + fclose(profile->usbmon_logfile_fp); +} diff --git a/wwan/app/quectel_cm_5G/src/ethtool-copy.h b/wwan/app/quectel_cm_5G/src/ethtool-copy.h new file mode 100644 index 0000000..b5515c2 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/ethtool-copy.h @@ -0,0 +1,1100 @@ +/* + * ethtool.h: Defines for Linux ethtool. + * + * Copyright (C) 1998 David S. Miller (davem@redhat.com) + * Copyright 2001 Jeff Garzik + * Portions Copyright 2001 Sun Microsystems (thockin@sun.com) + * Portions Copyright 2002 Intel (eli.kupermann@intel.com, + * christopher.leech@intel.com, + * scott.feldman@intel.com) + * Portions Copyright (C) Sun Microsystems 2008 + */ + +#ifndef _LINUX_ETHTOOL_H +#define _LINUX_ETHTOOL_H + +#include +#include + +/* This should work for both 32 and 64 bit userland. */ +struct ethtool_cmd { + __u32 cmd; + __u32 supported; /* Features this interface supports */ + __u32 advertising; /* Features this interface advertises */ + __u16 speed; /* The forced speed (lower bits) in + * Mbps. Please use + * ethtool_cmd_speed()/_set() to + * access it */ + __u8 duplex; /* Duplex, half or full */ + __u8 port; /* Which connector port */ + __u8 phy_address; /* MDIO PHY address (PRTAD for clause 45). + * May be read-only or read-write + * depending on the driver. + */ + __u8 transceiver; /* Which transceiver to use */ + __u8 autoneg; /* Enable or disable autonegotiation */ + __u8 mdio_support; /* MDIO protocols supported. Read-only. + * Not set by all drivers. + */ + __u32 maxtxpkt; /* Tx pkts before generating tx int */ + __u32 maxrxpkt; /* Rx pkts before generating rx int */ + __u16 speed_hi; /* The forced speed (upper + * bits) in Mbps. Please use + * ethtool_cmd_speed()/_set() to + * access it */ + __u8 eth_tp_mdix; /* twisted pair MDI-X status */ + __u8 eth_tp_mdix_ctrl; /* twisted pair MDI-X control, when set, + * link should be renegotiated if necessary + */ + __u32 lp_advertising; /* Features the link partner advertises */ + __u32 reserved[2]; +}; + +static __inline__ void ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + + ep->speed = (__u16)speed; + ep->speed_hi = (__u16)(speed >> 16); +} + +static __inline__ __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep) +{ + return (ep->speed_hi << 16) | ep->speed; +} + +/* Device supports clause 22 register access to PHY or peripherals + * using the interface defined in . This should not be + * set if there are known to be no such peripherals present or if + * the driver only emulates clause 22 registers for compatibility. + */ +#define ETH_MDIO_SUPPORTS_C22 1 + +/* Device supports clause 45 register access to PHY or peripherals + * using the interface defined in and . + * This should not be set if there are known to be no such peripherals + * present. + */ +#define ETH_MDIO_SUPPORTS_C45 2 + +#define ETHTOOL_FWVERS_LEN 32 +#define ETHTOOL_BUSINFO_LEN 32 +/* these strings are set to whatever the driver author decides... */ +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; /* driver short name, "tulip", "eepro100" */ + char version[32]; /* driver version string */ + char fw_version[ETHTOOL_FWVERS_LEN]; /* firmware version string */ + char bus_info[ETHTOOL_BUSINFO_LEN]; /* Bus info for this IF. */ + /* For PCI devices, use pci_name(pci_dev). */ + char reserved1[32]; + char reserved2[12]; + /* + * Some struct members below are filled in + * using ops->get_sset_count(). Obtaining + * this info from ethtool_drvinfo is now + * deprecated; Use ETHTOOL_GSSET_INFO + * instead. + */ + __u32 n_priv_flags; /* number of flags valid in ETHTOOL_GPFLAGS */ + __u32 n_stats; /* number of u64's from ETHTOOL_GSTATS */ + __u32 testinfo_len; + __u32 eedump_len; /* Size of data from ETHTOOL_GEEPROM (bytes) */ + __u32 regdump_len; /* Size of data from ETHTOOL_GREGS (bytes) */ +}; + +#define SOPASS_MAX 6 +/* wake-on-lan settings */ +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; + +/* for passing single values */ +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + +/* for passing big chunks of data */ +struct ethtool_regs { + __u32 cmd; + __u32 version; /* driver-specific, indicates different chips/revs */ + __u32 len; /* bytes */ + __u8 data[0]; +}; + +/* for passing EEPROM chunks */ +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; /* in bytes */ + __u32 len; /* in bytes */ + __u8 data[0]; +}; + +/** + * struct ethtool_eee - Energy Efficient Ethernet information + * @cmd: ETHTOOL_{G,S}EEE + * @supported: Mask of %SUPPORTED_* flags for the speed/duplex combinations + * for which there is EEE support. + * @advertised: Mask of %ADVERTISED_* flags for the speed/duplex combinations + * advertised as eee capable. + * @lp_advertised: Mask of %ADVERTISED_* flags for the speed/duplex + * combinations advertised by the link partner as eee capable. + * @eee_active: Result of the eee auto negotiation. + * @eee_enabled: EEE configured mode (enabled/disabled). + * @tx_lpi_enabled: Whether the interface should assert its tx lpi, given + * that eee was negotiated. + * @tx_lpi_timer: Time in microseconds the interface delays prior to asserting + * its tx lpi (after reaching 'idle' state). Effective only when eee + * was negotiated and tx_lpi_enabled was set. + */ +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; + +/** + * struct ethtool_modinfo - plugin module eeprom information + * @cmd: %ETHTOOL_GMODULEINFO + * @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx + * @eeprom_len: Length of the eeprom + * + * This structure is used to return the information to + * properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM. + * The type code indicates the eeprom data format + */ +struct ethtool_modinfo { + __u32 cmd; + __u32 type; + __u32 eeprom_len; + __u32 reserved[8]; +}; + +/** + * struct ethtool_coalesce - coalescing parameters for IRQs and stats updates + * @cmd: ETHTOOL_{G,S}COALESCE + * @rx_coalesce_usecs: How many usecs to delay an RX interrupt after + * a packet arrives. + * @rx_max_coalesced_frames: Maximum number of packets to receive + * before an RX interrupt. + * @rx_coalesce_usecs_irq: Same as @rx_coalesce_usecs, except that + * this value applies while an IRQ is being serviced by the host. + * @rx_max_coalesced_frames_irq: Same as @rx_max_coalesced_frames, + * except that this value applies while an IRQ is being serviced + * by the host. + * @tx_coalesce_usecs: How many usecs to delay a TX interrupt after + * a packet is sent. + * @tx_max_coalesced_frames: Maximum number of packets to be sent + * before a TX interrupt. + * @tx_coalesce_usecs_irq: Same as @tx_coalesce_usecs, except that + * this value applies while an IRQ is being serviced by the host. + * @tx_max_coalesced_frames_irq: Same as @tx_max_coalesced_frames, + * except that this value applies while an IRQ is being serviced + * by the host. + * @stats_block_coalesce_usecs: How many usecs to delay in-memory + * statistics block updates. Some drivers do not have an + * in-memory statistic block, and in such cases this value is + * ignored. This value must not be zero. + * @use_adaptive_rx_coalesce: Enable adaptive RX coalescing. + * @use_adaptive_tx_coalesce: Enable adaptive TX coalescing. + * @pkt_rate_low: Threshold for low packet rate (packets per second). + * @rx_coalesce_usecs_low: How many usecs to delay an RX interrupt after + * a packet arrives, when the packet rate is below @pkt_rate_low. + * @rx_max_coalesced_frames_low: Maximum number of packets to be received + * before an RX interrupt, when the packet rate is below @pkt_rate_low. + * @tx_coalesce_usecs_low: How many usecs to delay a TX interrupt after + * a packet is sent, when the packet rate is below @pkt_rate_low. + * @tx_max_coalesced_frames_low: Maximum nuumber of packets to be sent before + * a TX interrupt, when the packet rate is below @pkt_rate_low. + * @pkt_rate_high: Threshold for high packet rate (packets per second). + * @rx_coalesce_usecs_high: How many usecs to delay an RX interrupt after + * a packet arrives, when the packet rate is above @pkt_rate_high. + * @rx_max_coalesced_frames_high: Maximum number of packets to be received + * before an RX interrupt, when the packet rate is above @pkt_rate_high. + * @tx_coalesce_usecs_high: How many usecs to delay a TX interrupt after + * a packet is sent, when the packet rate is above @pkt_rate_high. + * @tx_max_coalesced_frames_high: Maximum number of packets to be sent before + * a TX interrupt, when the packet rate is above @pkt_rate_high. + * @rate_sample_interval: How often to do adaptive coalescing packet rate + * sampling, measured in seconds. Must not be zero. + * + * Each pair of (usecs, max_frames) fields specifies this exit + * condition for interrupt coalescing: + * (usecs > 0 && time_since_first_completion >= usecs) || + * (max_frames > 0 && completed_frames >= max_frames) + * It is illegal to set both usecs and max_frames to zero as this + * would cause interrupts to never be generated. To disable + * coalescing, set usecs = 0 and max_frames = 1. + * + * Some implementations ignore the value of max_frames and use the + * condition: + * time_since_first_completion >= usecs + * This is deprecated. Drivers for hardware that does not support + * counting completions should validate that max_frames == !rx_usecs. + * + * Adaptive RX/TX coalescing is an algorithm implemented by some + * drivers to improve latency under low packet rates and improve + * throughput under high packet rates. Some drivers only implement + * one of RX or TX adaptive coalescing. Anything not implemented by + * the driver causes these values to be silently ignored. + * + * When the packet rate is below @pkt_rate_high but above + * @pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ +struct ethtool_coalesce { + __u32 cmd; + __u32 rx_coalesce_usecs; + __u32 rx_max_coalesced_frames; + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + __u32 tx_coalesce_usecs; + __u32 tx_max_coalesced_frames; + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + __u32 stats_block_coalesce_usecs; + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + __u32 rate_sample_interval; +}; + +/* for configuring RX/TX ring parameters */ +struct ethtool_ringparam { + __u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; + +/** + * struct ethtool_channels - configuring number of network channel + * @cmd: ETHTOOL_{G,S}CHANNELS + * @max_rx: Read only. Maximum number of receive channel the driver support. + * @max_tx: Read only. Maximum number of transmit channel the driver support. + * @max_other: Read only. Maximum number of other channel the driver support. + * @max_combined: Read only. Maximum number of combined channel the driver + * support. Set of queues RX, TX or other. + * @rx_count: Valid values are in the range 1 to the max_rx. + * @tx_count: Valid values are in the range 1 to the max_tx. + * @other_count: Valid values are in the range 1 to the max_other. + * @combined_count: Valid values are in the range 1 to the max_combined. + * + * This can be used to configure RX, TX and other channels. + */ + +struct ethtool_channels { + __u32 cmd; + __u32 max_rx; + __u32 max_tx; + __u32 max_other; + __u32 max_combined; + __u32 rx_count; + __u32 tx_count; + __u32 other_count; + __u32 combined_count; +}; + +/* for configuring link flow control parameters */ +struct ethtool_pauseparam { + __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autoneg' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; + +#define ETH_GSTRING_LEN 32 +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, + ETH_SS_PRIV_FLAGS, + ETH_SS_NTUPLE_FILTERS, /* Do not use, GRXNTUPLE is now deprecated */ + ETH_SS_FEATURES, +}; + +/* for passing string sets for data tagging */ +struct ethtool_gstrings { + __u32 cmd; /* ETHTOOL_GSTRINGS */ + __u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + __u32 len; /* number of strings in the string set */ + __u8 data[0]; +}; + +struct ethtool_sset_info { + __u32 cmd; /* ETHTOOL_GSSET_INFO */ + __u32 reserved; + __u64 sset_mask; /* input: each bit selects an sset to query */ + /* output: each bit a returned sset */ + __u32 data[0]; /* ETH_SS_xxx count, in order, based on bits + in sset_mask. One bit implies one + __u32, two bits implies two + __u32's, etc. */ +}; + +/** + * enum ethtool_test_flags - flags definition of ethtool_test + * @ETH_TEST_FL_OFFLINE: if set perform online and offline tests, otherwise + * only online tests. + * @ETH_TEST_FL_FAILED: Driver set this flag if test fails. + * @ETH_TEST_FL_EXTERNAL_LB: Application request to perform external loopback + * test. + * @ETH_TEST_FL_EXTERNAL_LB_DONE: Driver performed the external loopback test + */ + +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = (1 << 0), + ETH_TEST_FL_FAILED = (1 << 1), + ETH_TEST_FL_EXTERNAL_LB = (1 << 2), + ETH_TEST_FL_EXTERNAL_LB_DONE = (1 << 3), +}; + +/* for requesting NIC test and getting results*/ +struct ethtool_test { + __u32 cmd; /* ETHTOOL_TEST */ + __u32 flags; /* ETH_TEST_FL_xxx */ + __u32 reserved; + __u32 len; /* result length, in number of u64 elements */ + __u64 data[0]; +}; + +/* for dumping NIC-specific statistics */ +struct ethtool_stats { + __u32 cmd; /* ETHTOOL_GSTATS */ + __u32 n_stats; /* number of u64's being returned */ + __u64 data[0]; +}; + +struct ethtool_perm_addr { + __u32 cmd; /* ETHTOOL_GPERMADDR */ + __u32 size; + __u8 data[0]; +}; + +/* boolean flags controlling per-interface behavior characteristics. + * When reading, the flag indicates whether or not a certain behavior + * is enabled/present. When writing, the flag indicates whether + * or not the driver should turn on (set) or off (clear) a behavior. + * + * Some behaviors may read-only (unconditionally absent or present). + * If such is the case, return EINVAL in the set-flags operation if the + * flag differs from the read-only value. + */ +enum ethtool_flags { + ETH_FLAG_TXVLAN = (1 << 7), /* TX VLAN offload enabled */ + ETH_FLAG_RXVLAN = (1 << 8), /* RX VLAN offload enabled */ + ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ + ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */ + ETH_FLAG_RXHASH = (1 << 28), +}; + +/* The following structures are for supporting RX network flow + * classification and RX n-tuple configuration. Note, all multibyte + * fields, e.g., ip4src, ip4dst, psrc, pdst, spi, etc. are expected to + * be in network byte order. + */ + +/** + * struct ethtool_tcpip4_spec - flow specification for TCP/IPv4 etc. + * @ip4src: Source host + * @ip4dst: Destination host + * @psrc: Source port + * @pdst: Destination port + * @tos: Type-of-service + * + * This can be used to specify a TCP/IPv4, UDP/IPv4 or SCTP/IPv4 flow. + */ +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +/** + * struct ethtool_ah_espip4_spec - flow specification for IPsec/IPv4 + * @ip4src: Source host + * @ip4dst: Destination host + * @spi: Security parameters index + * @tos: Type-of-service + * + * This can be used to specify an IPsec transport or tunnel over IPv4. + */ +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +#define ETH_RX_NFC_IP4 1 + +/** + * struct ethtool_usrip4_spec - general flow specification for IPv4 + * @ip4src: Source host + * @ip4dst: Destination host + * @l4_4_bytes: First 4 bytes of transport (layer 4) header + * @tos: Type-of-service + * @ip_ver: Value must be %ETH_RX_NFC_IP4; mask must be 0 + * @proto: Transport protocol number; mask must be 0 + */ +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + +union ethtool_flow_union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethhdr ether_spec; + __u8 hdata[52]; +}; + +/** + * struct ethtool_flow_ext - additional RX flow fields + * @h_dest: destination MAC address + * @vlan_etype: VLAN EtherType + * @vlan_tci: VLAN tag control information + * @data: user defined data + * + * Note, @vlan_etype, @vlan_tci, and @data are only valid if %FLOW_EXT + * is set in &struct ethtool_rx_flow_spec @flow_type. + * @h_dest is valid if %FLOW_MAC_EXT is set. + */ +struct ethtool_flow_ext { + __u8 padding[2]; + unsigned char h_dest[ETH_ALEN]; + __be16 vlan_etype; + __be16 vlan_tci; + __be32 data[2]; +}; + +/** + * struct ethtool_rx_flow_spec - classification rule for RX flows + * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW + * @h_u: Flow fields to match (dependent on @flow_type) + * @h_ext: Additional fields to match + * @m_u: Masks for flow field bits to be matched + * @m_ext: Masks for additional field bits to be matched + * Note, all additional fields must be ignored unless @flow_type + * includes the %FLOW_EXT or %FLOW_MAC_EXT flag + * (see &struct ethtool_flow_ext description). + * @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC + * if packets should be discarded + * @location: Location of rule in the table. Locations must be + * numbered such that a flow matching multiple rules will be + * classified according to the first (lowest numbered) rule. + */ +struct ethtool_rx_flow_spec { + __u32 flow_type; + union ethtool_flow_union h_u; + struct ethtool_flow_ext h_ext; + union ethtool_flow_union m_u; + struct ethtool_flow_ext m_ext; + __u64 ring_cookie; + __u32 location; +}; + +/** + * struct ethtool_rxnfc - command to get or set RX flow classification rules + * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH, + * %ETHTOOL_GRXRINGS, %ETHTOOL_GRXCLSRLCNT, %ETHTOOL_GRXCLSRULE, + * %ETHTOOL_GRXCLSRLALL, %ETHTOOL_SRXCLSRLDEL or %ETHTOOL_SRXCLSRLINS + * @flow_type: Type of flow to be affected, e.g. %TCP_V4_FLOW + * @data: Command-dependent value + * @fs: Flow classification rule + * @rule_cnt: Number of rules to be affected + * @rule_locs: Array of used rule locations + * + * For %ETHTOOL_GRXFH and %ETHTOOL_SRXFH, @data is a bitmask indicating + * the fields included in the flow hash, e.g. %RXH_IP_SRC. The following + * structure fields must not be used. + * + * For %ETHTOOL_GRXRINGS, @data is set to the number of RX rings/queues + * on return. + * + * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined + * rules on return. If @data is non-zero on return then it is the + * size of the rule table, plus the flag %RX_CLS_LOC_SPECIAL if the + * driver supports any special location values. If that flag is not + * set in @data then special location values should not be used. + * + * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the location of an + * existing rule on entry and @fs contains the rule on return. + * + * For %ETHTOOL_GRXCLSRLALL, @rule_cnt specifies the array size of the + * user buffer for @rule_locs on entry. On return, @data is the size + * of the rule table, @rule_cnt is the number of defined rules, and + * @rule_locs contains the locations of the defined rules. Drivers + * must use the second parameter to get_rxnfc() instead of @rule_locs. + * + * For %ETHTOOL_SRXCLSRLINS, @fs specifies the rule to add or update. + * @fs.@location either specifies the location to use or is a special + * location value with %RX_CLS_LOC_SPECIAL flag set. On return, + * @fs.@location is the actual rule location. + * + * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the location of an + * existing rule on entry. + * + * A driver supporting the special location values for + * %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused + * location, and may remove a rule at a later location (lower + * priority) that matches exactly the same set of flows. The special + * values are: %RX_CLS_LOC_ANY, selecting any location; + * %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum + * priority); and %RX_CLS_LOC_LAST, selecting the last suitable + * location (minimum priority). Additional special values may be + * defined in future and drivers must return -%EINVAL for any + * unrecognised value. + */ +struct ethtool_rxnfc { + __u32 cmd; + __u32 flow_type; + __u64 data; + struct ethtool_rx_flow_spec fs; + __u32 rule_cnt; + __u32 rule_locs[0]; +}; + + +/** + * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection + * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR + * @size: On entry, the array size of the user buffer, which may be zero. + * On return from %ETHTOOL_GRXFHINDIR, the array size of the hardware + * indirection table. + * @ring_index: RX ring/queue index for each hash value + * + * For %ETHTOOL_GRXFHINDIR, a @size of zero means that only the size + * should be returned. For %ETHTOOL_SRXFHINDIR, a @size of zero means + * the table should be reset to default values. This last feature + * is not supported by the original implementations. + */ +struct ethtool_rxfh_indir { + __u32 cmd; + __u32 size; + __u32 ring_index[0]; +}; + +/** + * struct ethtool_rx_ntuple_flow_spec - specification for RX flow filter + * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW + * @h_u: Flow field values to match (dependent on @flow_type) + * @m_u: Masks for flow field value bits to be ignored + * @vlan_tag: VLAN tag to match + * @vlan_tag_mask: Mask for VLAN tag bits to be ignored + * @data: Driver-dependent data to match + * @data_mask: Mask for driver-dependent data bits to be ignored + * @action: RX ring/queue index to deliver to (non-negative) or other action + * (negative, e.g. %ETHTOOL_RXNTUPLE_ACTION_DROP) + * + * For flow types %TCP_V4_FLOW, %UDP_V4_FLOW and %SCTP_V4_FLOW, where + * a field value and mask are both zero this is treated as if all mask + * bits are set i.e. the field is ignored. + */ +struct ethtool_rx_ntuple_flow_spec { + __u32 flow_type; + union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethhdr ether_spec; + __u8 hdata[72]; + } h_u, m_u; + + __u16 vlan_tag; + __u16 vlan_tag_mask; + __u64 data; + __u64 data_mask; + + __s32 action; +#define ETHTOOL_RXNTUPLE_ACTION_DROP (-1) /* drop packet */ +#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) /* clear filter */ +}; + +/** + * struct ethtool_rx_ntuple - command to set or clear RX flow filter + * @cmd: Command number - %ETHTOOL_SRXNTUPLE + * @fs: Flow filter specification + */ +struct ethtool_rx_ntuple { + __u32 cmd; + struct ethtool_rx_ntuple_flow_spec fs; +}; + +#define ETHTOOL_FLASH_MAX_FILENAME 128 +enum ethtool_flash_op_type { + ETHTOOL_FLASH_ALL_REGIONS = 0, +}; + +/* for passing firmware flashing related parameters */ +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[ETHTOOL_FLASH_MAX_FILENAME]; +}; + +/** + * struct ethtool_dump - used for retrieving, setting device dump + * @cmd: Command number - %ETHTOOL_GET_DUMP_FLAG, %ETHTOOL_GET_DUMP_DATA, or + * %ETHTOOL_SET_DUMP + * @version: FW version of the dump, filled in by driver + * @flag: driver dependent flag for dump setting, filled in by driver during + * get and filled in by ethtool for set operation. + * flag must be initialized by macro ETH_FW_DUMP_DISABLE value when + * firmware dump is disabled. + * @len: length of dump data, used as the length of the user buffer on entry to + * %ETHTOOL_GET_DUMP_DATA and this is returned as dump length by driver + * for %ETHTOOL_GET_DUMP_FLAG command + * @data: data collected for get dump data operation + */ + +#define ETH_FW_DUMP_DISABLE 0 + +struct ethtool_dump { + __u32 cmd; + __u32 version; + __u32 flag; + __u32 len; + __u8 data[0]; +}; + +/* for returning and changing feature sets */ + +/** + * struct ethtool_get_features_block - block with state of 32 features + * @available: mask of changeable features + * @requested: mask of features requested to be enabled if possible + * @active: mask of currently enabled features + * @never_changed: mask of features not changeable for any device + */ +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; + +/** + * struct ethtool_gfeatures - command to get state of device's features + * @cmd: command number = %ETHTOOL_GFEATURES + * @size: in: number of elements in the features[] array; + * out: number of elements in features[] needed to hold all features + * @features: state of features + */ +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + +/** + * struct ethtool_set_features_block - block with request for 32 features + * @valid: mask of features to be changed + * @requested: values of features to be changed + */ +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + +/** + * struct ethtool_sfeatures - command to request change in device's features + * @cmd: command number = %ETHTOOL_SFEATURES + * @size: array size of the features[] array + * @features: feature change masks + */ +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; + +/** + * struct ethtool_ts_info - holds a device's timestamping and PHC association + * @cmd: command number = %ETHTOOL_GET_TS_INFO + * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags + * @phc_index: device index of the associated PHC, or -1 if there is none + * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values + * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values + * + * The bits in the 'tx_types' and 'rx_filters' fields correspond to + * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values, + * respectively. For example, if the device supports HWTSTAMP_TX_ON, + * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set. + */ +struct ethtool_ts_info { + __u32 cmd; + __u32 so_timestamping; + __s32 phc_index; + __u32 tx_types; + __u32 tx_reserved[3]; + __u32 rx_filters; + __u32 rx_reserved[3]; +}; + +/* + * %ETHTOOL_SFEATURES changes features present in features[].valid to the + * values of corresponding bits in features[].requested. Bits in .requested + * not set in .valid or not changeable are ignored. + * + * Returns %EINVAL when .valid contains undefined or never-changeable bits + * or size is not equal to required number of features words (32-bit blocks). + * Returns >= 0 if request was completed; bits set in the value mean: + * %ETHTOOL_F_UNSUPPORTED - there were bits set in .valid that are not + * changeable (not present in %ETHTOOL_GFEATURES' features[].available) + * those bits were ignored. + * %ETHTOOL_F_WISH - some or all changes requested were recorded but the + * resulting state of bits masked by .valid is not equal to .requested. + * Probably there are other device-specific constraints on some features + * in the set. When %ETHTOOL_F_UNSUPPORTED is set, .valid is considered + * here as though ignored bits were cleared. + * %ETHTOOL_F_COMPAT - some or all changes requested were made by calling + * compatibility functions. Requested offload state cannot be properly + * managed by kernel. + * + * Meaning of bits in the masks are obtained by %ETHTOOL_GSSET_INFO (number of + * bits in the arrays - always multiple of 32) and %ETHTOOL_GSTRINGS commands + * for ETH_SS_FEATURES string set. First entry in the table corresponds to least + * significant bit in features[0] fields. Empty strings mark undefined features. + */ +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT, + ETHTOOL_F_WISH__BIT, + ETHTOOL_F_COMPAT__BIT, +}; + +#define ETHTOOL_F_UNSUPPORTED (1 << ETHTOOL_F_UNSUPPORTED__BIT) +#define ETHTOOL_F_WISH (1 << ETHTOOL_F_WISH__BIT) +#define ETHTOOL_F_COMPAT (1 << ETHTOOL_F_COMPAT__BIT) + + +/* CMDs currently supported */ +#define ETHTOOL_GSET 0x00000001 /* Get settings. */ +#define ETHTOOL_SSET 0x00000002 /* Set settings. */ +#define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */ +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */ +#define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */ +#define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */ +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */ +/* Get link status for host, i.e. whether the interface *and* the + * physical port (if there is one) are up (ethtool_value). */ +#define ETHTOOL_GLINK 0x0000000a +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */ +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */ +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */ +#define ETHTOOL_GUFO 0x00000021 /* Get UFO enable (ethtool_value) */ +#define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */ +#define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */ +#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ +#define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */ +#define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */ +#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ +#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ + +#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */ +#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ +#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */ +#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */ +#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */ +#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */ +#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */ +#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */ +#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */ +#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */ +#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */ +#define ETHTOOL_RESET 0x00000034 /* Reset hardware */ +#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */ +#define ETHTOOL_GRXNTUPLE 0x00000036 /* deprecated */ +#define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */ +#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */ +#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */ + +#define ETHTOOL_GFEATURES 0x0000003a /* Get device offload settings */ +#define ETHTOOL_SFEATURES 0x0000003b /* Change device offload settings */ +#define ETHTOOL_GCHANNELS 0x0000003c /* Get no of channels */ +#define ETHTOOL_SCHANNELS 0x0000003d /* Set no of channels */ +#define ETHTOOL_SET_DUMP 0x0000003e /* Set dump settings */ +#define ETHTOOL_GET_DUMP_FLAG 0x0000003f /* Get dump settings */ +#define ETHTOOL_GET_DUMP_DATA 0x00000040 /* Get dump data */ +#define ETHTOOL_GET_TS_INFO 0x00000041 /* Get time stamping and PHC info */ +#define ETHTOOL_GMODULEINFO 0x00000042 /* Get plug-in module information */ +#define ETHTOOL_GMODULEEEPROM 0x00000043 /* Get plug-in module eeprom */ +#define ETHTOOL_GEEE 0x00000044 /* Get EEE settings */ +#define ETHTOOL_SEEE 0x00000045 /* Set EEE settings */ + +/* compatibility with older code */ +#define SPARC_ETH_GSET ETHTOOL_GSET +#define SPARC_ETH_SSET ETHTOOL_SSET + +/* Indicates what features are supported by the interface. */ +#define SUPPORTED_10baseT_Half (1 << 0) +#define SUPPORTED_10baseT_Full (1 << 1) +#define SUPPORTED_100baseT_Half (1 << 2) +#define SUPPORTED_100baseT_Full (1 << 3) +#define SUPPORTED_1000baseT_Half (1 << 4) +#define SUPPORTED_1000baseT_Full (1 << 5) +#define SUPPORTED_Autoneg (1 << 6) +#define SUPPORTED_TP (1 << 7) +#define SUPPORTED_AUI (1 << 8) +#define SUPPORTED_MII (1 << 9) +#define SUPPORTED_FIBRE (1 << 10) +#define SUPPORTED_BNC (1 << 11) +#define SUPPORTED_10000baseT_Full (1 << 12) +#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Asym_Pause (1 << 14) +#define SUPPORTED_2500baseX_Full (1 << 15) +#define SUPPORTED_Backplane (1 << 16) +#define SUPPORTED_1000baseKX_Full (1 << 17) +#define SUPPORTED_10000baseKX4_Full (1 << 18) +#define SUPPORTED_10000baseKR_Full (1 << 19) +#define SUPPORTED_10000baseR_FEC (1 << 20) +#define SUPPORTED_20000baseMLD2_Full (1 << 21) +#define SUPPORTED_20000baseKR2_Full (1 << 22) +#define SUPPORTED_40000baseKR4_Full (1 << 23) +#define SUPPORTED_40000baseCR4_Full (1 << 24) +#define SUPPORTED_40000baseSR4_Full (1 << 25) +#define SUPPORTED_40000baseLR4_Full (1 << 26) + +/* Indicates what features are advertised by the interface. */ +#define ADVERTISED_10baseT_Half (1 << 0) +#define ADVERTISED_10baseT_Full (1 << 1) +#define ADVERTISED_100baseT_Half (1 << 2) +#define ADVERTISED_100baseT_Full (1 << 3) +#define ADVERTISED_1000baseT_Half (1 << 4) +#define ADVERTISED_1000baseT_Full (1 << 5) +#define ADVERTISED_Autoneg (1 << 6) +#define ADVERTISED_TP (1 << 7) +#define ADVERTISED_AUI (1 << 8) +#define ADVERTISED_MII (1 << 9) +#define ADVERTISED_FIBRE (1 << 10) +#define ADVERTISED_BNC (1 << 11) +#define ADVERTISED_10000baseT_Full (1 << 12) +#define ADVERTISED_Pause (1 << 13) +#define ADVERTISED_Asym_Pause (1 << 14) +#define ADVERTISED_2500baseX_Full (1 << 15) +#define ADVERTISED_Backplane (1 << 16) +#define ADVERTISED_1000baseKX_Full (1 << 17) +#define ADVERTISED_10000baseKX4_Full (1 << 18) +#define ADVERTISED_10000baseKR_Full (1 << 19) +#define ADVERTISED_10000baseR_FEC (1 << 20) +#define ADVERTISED_20000baseMLD2_Full (1 << 21) +#define ADVERTISED_20000baseKR2_Full (1 << 22) +#define ADVERTISED_40000baseKR4_Full (1 << 23) +#define ADVERTISED_40000baseCR4_Full (1 << 24) +#define ADVERTISED_40000baseSR4_Full (1 << 25) +#define ADVERTISED_40000baseLR4_Full (1 << 26) + +/* The following are all involved in forcing a particular link + * mode for the device for setting things. When getting the + * devices settings, these indicate the current mode and whether + * it was forced up into this mode or autonegotiated. + */ + +/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_10000 10000 +#define SPEED_UNKNOWN -1 + +/* Duplex, half or full. */ +#define DUPLEX_HALF 0x00 +#define DUPLEX_FULL 0x01 +#define DUPLEX_UNKNOWN 0xff + +/* Which connector port. */ +#define PORT_TP 0x00 +#define PORT_AUI 0x01 +#define PORT_MII 0x02 +#define PORT_FIBRE 0x03 +#define PORT_BNC 0x04 +#define PORT_DA 0x05 +#define PORT_NONE 0xef +#define PORT_OTHER 0xff + +/* Which transceiver to use. */ +#define XCVR_INTERNAL 0x00 +#define XCVR_EXTERNAL 0x01 +#define XCVR_DUMMY1 0x02 +#define XCVR_DUMMY2 0x03 +#define XCVR_DUMMY3 0x04 + +/* Enable or disable autonegotiation. If this is set to enable, + * the forced link modes above are completely ignored. + */ +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +/* MDI or MDI-X status/control - if MDI/MDI_X/AUTO is set then + * the driver is required to renegotiate link + */ +#define ETH_TP_MDI_INVALID 0x00 /* status: unknown; control: unsupported */ +#define ETH_TP_MDI 0x01 /* status: MDI; control: force MDI */ +#define ETH_TP_MDI_X 0x02 /* status: MDI-X; control: force MDI-X */ +#define ETH_TP_MDI_AUTO 0x03 /* control: auto-select */ + +/* Wake-On-Lan options. */ +#define WAKE_PHY (1 << 0) +#define WAKE_UCAST (1 << 1) +#define WAKE_MCAST (1 << 2) +#define WAKE_BCAST (1 << 3) +#define WAKE_ARP (1 << 4) +#define WAKE_MAGIC (1 << 5) +#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ + +/* L2-L4 network traffic flow types */ +#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ +#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ +#define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */ +#define AH_ESP_V4_FLOW 0x04 /* hash only */ +#define TCP_V6_FLOW 0x05 /* hash only */ +#define UDP_V6_FLOW 0x06 /* hash only */ +#define SCTP_V6_FLOW 0x07 /* hash only */ +#define AH_ESP_V6_FLOW 0x08 /* hash only */ +#define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */ +#define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */ +#define AH_V6_FLOW 0x0b /* hash only */ +#define ESP_V6_FLOW 0x0c /* hash only */ +#define IP_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#define IPV4_FLOW 0x10 /* hash only */ +#define IPV6_FLOW 0x11 /* hash only */ +#define ETHER_FLOW 0x12 /* spec only (ether_spec) */ +/* Flag to enable additional fields in struct ethtool_rx_flow_spec */ +#define FLOW_EXT 0x80000000 +#define FLOW_MAC_EXT 0x40000000 + +/* L3-L4 network traffic flow hash options */ +#define RXH_L2DA (1 << 1) +#define RXH_VLAN (1 << 2) +#define RXH_L3_PROTO (1 << 3) +#define RXH_IP_SRC (1 << 4) +#define RXH_IP_DST (1 << 5) +#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */ +#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */ +#define RXH_DISCARD (1 << 31) + +#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL + +/* Special RX classification rule insert location values */ +#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */ +#define RX_CLS_LOC_ANY 0xffffffff +#define RX_CLS_LOC_FIRST 0xfffffffe +#define RX_CLS_LOC_LAST 0xfffffffd + +/* EEPROM Standards for plug in modules */ +#define ETH_MODULE_SFF_8079 0x1 +#define ETH_MODULE_SFF_8079_LEN 256 +#define ETH_MODULE_SFF_8472 0x2 +#define ETH_MODULE_SFF_8472_LEN 512 + +/* Reset flags */ +/* The reset() operation must clear the flags for the components which + * were actually reset. On successful return, the flags indicate the + * components which were not reset, either because they do not exist + * in the hardware or because they cannot be reset independently. The + * driver must never reset any components that were not requested. + */ +enum ethtool_reset_flags { + /* These flags represent components dedicated to the interface + * the command is addressed to. Shift any flag left by + * ETH_RESET_SHARED_SHIFT to reset a shared component of the + * same type. + */ + ETH_RESET_MGMT = 1 << 0, /* Management processor */ + ETH_RESET_IRQ = 1 << 1, /* Interrupt requester */ + ETH_RESET_DMA = 1 << 2, /* DMA engine */ + ETH_RESET_FILTER = 1 << 3, /* Filtering/flow direction */ + ETH_RESET_OFFLOAD = 1 << 4, /* Protocol offload */ + ETH_RESET_MAC = 1 << 5, /* Media access controller */ + ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */ + ETH_RESET_RAM = 1 << 7, /* RAM shared between + * multiple components */ + + ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to + * this interface */ + ETH_RESET_ALL = 0xffffffff, /* All components used by this + * interface, even if shared */ +}; +#define ETH_RESET_SHARED_SHIFT 16 + +#endif /* _LINUX_ETHTOOL_H */ diff --git a/wwan/app/quectel_cm_5G/src/main.c b/wwan/app/quectel_cm_5G/src/main.c new file mode 100644 index 0000000..579b4f1 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/main.c @@ -0,0 +1,956 @@ +/****************************************************************************** + @file main.c + @brief The entry program. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 -2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ + +#include "QMIThread.h" +#include +#include +#include +#include + +#include "util.h" +//#define CONFIG_PID_FILE_FORMAT "/var/run/quectel-CM-%s.pid" //for example /var/run/quectel-CM-wwan0.pid + +static PROFILE_T s_profile; +int debug_qmi = 0; +int qmidevice_control_fd[2]; +static int signal_control_fd[2]; +int g_donot_exit_when_modem_hangup = 0; +extern int ql_ifconfig(int argc, char *argv[]); +extern int ql_get_netcard_driver_info(const char*); +extern int ql_capture_usbmon_log(PROFILE_T *profile, const char *log_path); +extern void ql_stop_usbmon_log(PROFILE_T *profile); +//UINT ifc_get_addr(const char *ifname); +static int s_link = -1; +static void usbnet_link_change(int link, PROFILE_T *profile) { + if (s_link == link) + return; + + s_link = link; + + if (!(link & (1<ipv4, 0, sizeof(IPV4_T)); + + if (!(link & (1<ipv6, 0, sizeof(IPV6_T)); + + if (link) { + udhcpc_start(profile); + } else { + udhcpc_stop(profile); + } +} + +static int check_ipv4_address(PROFILE_T *profile) { + uint32_t oldAddress = profile->ipv4.Address; + + if (profile->request_ops == &mbim_request_ops) + return 1; //we will get a new ipv6 address per requestGetIPAddress() + if (profile->request_ops == &atc_request_ops) { + if (!profile->udhcpc_ip) return 1; + oldAddress = profile->udhcpc_ip; + } + + if (profile->request_ops->requestGetIPAddress(profile, IpFamilyV4) == 0) { + if (profile->ipv4.Address != oldAddress || debug_qmi) { + unsigned char *l = (unsigned char *)&oldAddress; + unsigned char *r = (unsigned char *)&profile->ipv4.Address; + dbg_time("localIP: %d.%d.%d.%d VS remoteIP: %d.%d.%d.%d", + l[3], l[2], l[1], l[0], r[3], r[2], r[1], r[0]); + } + return (profile->ipv4.Address == oldAddress); + } + + return 0; +} + +static void main_send_event_to_qmidevice(int triger_event) { + if (write(qmidevice_control_fd[0], &triger_event, sizeof(triger_event)) == -1) {}; +} + +static void send_signo_to_main(int signo) { + if (write(signal_control_fd[0], &signo, sizeof(signo)) == -1) {}; +} + +void qmidevice_send_event_to_main(int triger_event) { + if (write(qmidevice_control_fd[1], &triger_event, sizeof(triger_event)) == -1) {}; +} + +void qmidevice_send_event_to_main_ext(int triger_event, void *data, unsigned len) { + if (write(qmidevice_control_fd[1], &triger_event, sizeof(triger_event)) == -1) {}; + if (write(qmidevice_control_fd[1], data, len) == -1) {}; +} + +#define MAX_PATH 256 + +static int ls_dir(const char *dir, int (*match)(const char *dir, const char *file, void *argv[]), void *argv[]) +{ + DIR *pDir; + struct dirent* ent = NULL; + int match_times = 0; + + pDir = opendir(dir); + if (pDir == NULL) { + dbg_time("Cannot open directory: %s, errno: %d (%s)", dir, errno, strerror(errno)); + return 0; + } + + while ((ent = readdir(pDir)) != NULL) { + match_times += match(dir, ent->d_name, argv); + } + closedir(pDir); + + return match_times; +} + +static int is_same_linkfile(const char *dir, const char *file, void *argv[]) +{ + const char *qmichannel = (const char *)argv[1]; + char linkname[MAX_PATH*2+6]; + char filename[MAX_PATH]; + int linksize; + + snprintf(linkname, sizeof(linkname), "%.256s/%s", dir, file); + linksize = readlink(linkname, filename, sizeof(filename)); + if (linksize <= 0) + return 0; + + filename[linksize] = 0; + if (strcmp(filename, qmichannel)) + return 0; + + dbg_time("%s -> %s", linkname, filename); + return 1; +} + +static int is_brother_process(const char *dir, const char *file, void *argv[]) +{ + //const char *myself = (const char *)argv[0]; + char linkname[MAX_PATH*2+6]; + char filename[MAX_PATH]; + int linksize; + int i = 0, kill_timeout = 15; + pid_t pid; + + //dbg_time("%s", file); + while (file[i]) { + if (!isdigit(file[i])) + break; + i++; + } + + if (file[i]) { + //dbg_time("%s not digit", file); + return 0; + } + + snprintf(linkname, sizeof(linkname), "%s/%s/exe", dir, file); + linksize = readlink(linkname, filename, sizeof(filename)); + if (linksize <= 0) + return 0; + + filename[linksize] = 0; + + pid = atoi(file); + if (pid >= getpid()) + return 0; + + snprintf(linkname, sizeof(linkname), "%s/%s/fd", dir, file); + if (!ls_dir(linkname, is_same_linkfile, argv)) + return 0; + + dbg_time("%s/%s/exe -> %s", dir, file, filename); + while (kill_timeout-- && !kill(pid, 0)) + { + kill(pid, SIGTERM); + sleep(1); + } + if (!kill(pid, 0)) + { + dbg_time("force kill %s/%s/exe -> %s", dir, file, filename); + kill(pid, SIGKILL); + sleep(1); + } + + return 1; +} + +static int kill_brothers(const char *qmichannel) +{ + char myself[MAX_PATH]; + int filenamesize; + void *argv[2] = {myself, (void *)qmichannel}; + + filenamesize = readlink("/proc/self/exe", myself, sizeof(myself)); + if (filenamesize <= 0) + return 0; + myself[filenamesize] = 0; + + if (ls_dir("/proc", is_brother_process, argv)) + sleep(1); + + return 0; +} + +static int kill_data_call_pdp(int pdp, char *self) { + int pid; + char *p = NULL; + + p = self; + while (*self) { + if (*self == '/') + p = self+1; + self++; + } + + pid = getpid_by_pdp(pdp, p); + if (pid > 0) { + dbg_time("send SIGINT to process %d", pid); + return kill(pid, SIGINT); + } + + return -1; +} + +static void ql_sigaction(int signo) { + if (SIGALRM == signo) + send_signo_to_main(SIG_EVENT_START); + else + { + g_donot_exit_when_modem_hangup = 0; + send_signo_to_main(SIG_EVENT_STOP); + main_send_event_to_qmidevice(SIG_EVENT_STOP); //main may be wating qmi response + } +} + +static int usage(const char *progname) { + dbg_time("Usage: %s [options]", progname); + dbg_time("-s [apn [user password auth]] Set apn/user/password/auth get from your network provider. auth: 1~pap, 2~chap, 3~MsChapV2"); + dbg_time("-p pincode Verify sim card pin if sim card is locked"); + dbg_time("-p [quectel-][qmi|mbim]-proxy Request to use proxy"); + dbg_time("-f logfilename Save log message of this program to file"); + dbg_time("-u usbmonlog filename Save usbmon log to file"); + dbg_time("-i interface Specify which network interface to setup data call when multi-modems exits"); + dbg_time("-4 Setup IPv4 data call (default)"); + dbg_time("-6 Setup IPv6 data call"); + dbg_time("-n pdn Specify which pdn to setup data call (default 1 for QMI, 0 for MBIM)"); + dbg_time("-k pdn Specify which pdn to hangup data call (by send SIGINT to 'quectel-CM -n pdn')"); + dbg_time("-m iface-idx Bind QMI data call to wwan0_ when QMAP used. E.g '-n 7 -m 1' bind pdn-7 data call to wwan0_1"); + dbg_time("-b Enable network interface bridge function (default 0)"); + dbg_time("-v Verbose log mode, for debug purpose."); + dbg_time("-d Obtain the IP address and dns through qmi"); + dbg_time("[Examples]"); + dbg_time("Example 1: %s ", progname); + dbg_time("Example 2: %s -s 3gnet ", progname); + dbg_time("Example 3: %s -s 3gnet carl 1234 1 -p 1234 -f gobinet_log.txt", progname); + return 0; +} + +static int qmi_main(PROFILE_T *profile) +{ + int triger_event = 0; + int signo; +#ifdef CONFIG_SIM + SIM_Status SIMStatus = SIM_ABSENT; +#endif + UCHAR PSAttachedState = 0; + UCHAR IPv4ConnectionStatus = QWDS_PKT_DATA_UNKNOW; + UCHAR IPv6ConnectionStatus = QWDS_PKT_DATA_UNKNOW; + unsigned SetupCallFail = 0; + unsigned long SetupCallAllowTime = clock_msec(); +#ifdef REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS + unsigned PsAttachFail = 0; + unsigned long PsAttachTime = clock_msec(); +#endif + int qmierr = 0; + const struct request_ops *request_ops = profile ->request_ops; + pthread_t gQmiThreadID = 0; + +//sudo apt-get install udhcpc +//sudo apt-get remove ModemManager + if (profile->reattach_flag) { + if (!reattach_driver(profile)) + sleep(2); + } + + /* try to recreate FDs*/ + if (socketpair( AF_LOCAL, SOCK_STREAM, 0, signal_control_fd) < 0 ) { + dbg_time("%s Faild to create main_control_fd: %d (%s)", __func__, errno, strerror(errno)); + return -1; + } + + if ( socketpair( AF_LOCAL, SOCK_STREAM, 0, qmidevice_control_fd ) < 0 ) { + dbg_time("%s Failed to create thread control socket pair: %d (%s)", __func__, errno, strerror(errno)); + return 0; + } + + if ((profile->qmap_mode == 0 || profile->qmap_mode == 1) + && (!profile->proxy[0] || strstr(profile->qmichannel, "_IPCR"))) { + kill_brothers(profile->qmichannel); + } + + if (pthread_create( &gQmiThreadID, 0, profile->qmi_ops->read, (void *)profile) != 0) { + dbg_time("%s Failed to create QMIThread: %d (%s)", __func__, errno, strerror(errno)); + return 0; + } + + if ((read(qmidevice_control_fd[0], &triger_event, sizeof(triger_event)) != sizeof(triger_event)) + || (triger_event != RIL_INDICATE_DEVICE_CONNECTED)) { + dbg_time("%s Failed to init QMIThread: %d (%s)", __func__, errno, strerror(errno)); + return 0; + } + + if (profile->qmi_ops->init && profile->qmi_ops->init(profile)) { + dbg_time("%s Failed to qmi init: %d (%s)", __func__, errno, strerror(errno)); + return 0; + } + + if (request_ops->requestBaseBandVersion) + request_ops->requestBaseBandVersion(profile); + + if (request_ops->requestSetEthMode) + request_ops->requestSetEthMode(profile); + + if (request_ops->requestSetLoopBackState && profile->loopback_state) { + qmierr = request_ops->requestSetLoopBackState(profile->loopback_state, profile->replication_factor); + if (qmierr != QMI_ERR_INVALID_QMI_CMD) //X20 return this error + profile->loopback_state = 0; //wait for RIL_UNSOL_LOOPBACK_CONFIG_IND + } + + if (request_ops->requestGetSIMStatus) { + qmierr = request_ops->requestGetSIMStatus(&SIMStatus); + + while (qmierr == QMI_ERR_OP_DEVICE_UNSUPPORTED) { + sleep(1); + qmierr = request_ops->requestGetSIMStatus(&SIMStatus); + } + + if ((SIMStatus == SIM_PIN) && profile->pincode && request_ops->requestEnterSimPin) { + request_ops->requestEnterSimPin(profile->pincode); + } + } + + if (SIMStatus == SIM_READY) { + if (request_ops->requestGetICCID) + request_ops->requestGetICCID(); + + if (request_ops->requestGetIMSI) + request_ops->requestGetIMSI(); + } + + if (request_ops->requestGetProfile) + request_ops->requestGetProfile(profile); + + if (request_ops->requestSetProfile && (profile->apn || profile->user || profile->password)) { + if (request_ops->requestSetProfile(profile) == 1) { +#ifdef REBOOT_SIM_CARD_WHEN_APN_CHANGE //enable at only when customer asked + if (request_ops->requestRadioPower) { + request_ops->requestRadioPower(0); + request_ops->requestRadioPower(1); + } +#endif + } + } + + request_ops->requestRegistrationState(&PSAttachedState); + +#ifdef CONFIG_ENABLE_QOS + request_ops->requestRegisterQos(profile); +#endif + +#if 1 //USB disconnnect and re-connect, but not reboot modem, will get this bug + if (profile->enable_ipv4 + && profile->request_ops == &atc_request_ops + && !request_ops->requestQueryDataCall(&IPv4ConnectionStatus, IpFamilyV4) + && IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED) { + request_ops->requestDeactivateDefaultPDP(profile, IpFamilyV4); + } +#endif + + send_signo_to_main(SIG_EVENT_CHECK); + + while (1) + { + struct pollfd pollfds[] = {{signal_control_fd[1], POLLIN, 0}, {qmidevice_control_fd[0], POLLIN, 0}}; + int ne, ret, nevents = sizeof(pollfds)/sizeof(pollfds[0]); + + do { + ret = poll(pollfds, nevents, 15*1000); + } while ((ret < 0) && (errno == EINTR)); + + if (ret == 0) + { + send_signo_to_main(SIG_EVENT_CHECK); + continue; + } + + if (ret <= 0) { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + goto __main_quit; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dbg_time("%s poll err/hup", __func__); + dbg_time("epoll fd = %d, events = 0x%04x", fd, revents); + main_send_event_to_qmidevice(RIL_REQUEST_QUIT); + if (revents & POLLHUP) + goto __main_quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (fd == signal_control_fd[1]) + { + if (read(fd, &signo, sizeof(signo)) == sizeof(signo)) + { + alarm(0); + switch (signo) + { + case SIG_EVENT_START: + if (PSAttachedState != 1 && profile->loopback_state == 0) + break; + + if (SetupCallAllowTime > clock_msec()) { + alarm((SetupCallAllowTime - clock_msec()+999)/1000); + break; + } + + if (profile->enable_ipv4 && IPv4ConnectionStatus != QWDS_PKT_DATA_CONNECTED) { + qmierr = request_ops->requestSetupDataCall(profile, IpFamilyV4); + + if ((qmierr > 0) && profile->user && profile->user[0] && profile->password && profile->password[0]) { + int old_auto = profile->auth; + + //may be fail because wrong auth mode, try pap->chap, or chap->pap + profile->auth = (profile->auth == 1) ? 2 : 1; + qmierr = request_ops->requestSetupDataCall(profile, IpFamilyV4); + + if (qmierr) + profile->auth = old_auto; //still fail, restore old auth moe + } + + if (!qmierr) { + qmierr = request_ops->requestGetIPAddress(profile, IpFamilyV4); + if (!qmierr) + IPv4ConnectionStatus = QWDS_PKT_DATA_CONNECTED; + } + + } + + if (profile->enable_ipv6 && IPv6ConnectionStatus != QWDS_PKT_DATA_CONNECTED) { + if (profile->enable_ipv4 && profile->request_ops != &qmi_request_ops) { + IPv6ConnectionStatus = IPv4ConnectionStatus; + } + else { + qmierr = request_ops->requestSetupDataCall(profile, IpFamilyV6); + + if (!qmierr) { + qmierr = request_ops->requestGetIPAddress(profile, IpFamilyV6); + if (!qmierr) + IPv6ConnectionStatus = QWDS_PKT_DATA_CONNECTED; + } + } + } + + if ((profile->enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED) + || (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)) { + const unsigned allow_time[] = {5, 10, 20, 40, 60}; + + if (SetupCallFail < (sizeof(allow_time)/sizeof(unsigned))) + SetupCallAllowTime = allow_time[SetupCallFail]; + else + SetupCallAllowTime = 60; + SetupCallFail++; + dbg_time("try to requestSetupDataCall %ld second later", SetupCallAllowTime); + alarm(SetupCallAllowTime); + SetupCallAllowTime = SetupCallAllowTime*1000 + clock_msec(); + } + else if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED || IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) { + SetupCallFail = 0; + SetupCallAllowTime = clock_msec(); + } + break; + + case SIG_EVENT_CHECK: + if (request_ops->requestGetSignalInfo) + request_ops->requestGetSignalInfo(); + + if (request_ops->requestGetCellInfoList) + request_ops->requestGetCellInfoList(); + + if (request_ops->requestGetCoexWWANState) + request_ops->requestGetCoexWWANState(); + + if (PSAttachedState != 1) + request_ops->requestRegistrationState(&PSAttachedState); + +#ifdef REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS + if (PSAttachedState) { + PsAttachTime = clock_msec(); + PsAttachFail = 0; + } + else { + unsigned long diff = (clock_msec() - PsAttachTime) / 1000; + unsigned long threshold = REBOOT_SIM_CARD_WHEN_LONG_TIME_NO_PS << PsAttachFail; + + if (diff > threshold || diff > 960) { + //interval time is 60 -> 120 - > 240 - > 480 -> 960 + PsAttachTime = clock_msec(); + PsAttachFail++; + + if (request_ops->requestRadioPower) { + request_ops->requestRadioPower(0); + request_ops->requestRadioPower(1); + } + } + } +#endif + + if (profile->enable_ipv4 && IPv4ConnectionStatus != QWDS_PKT_DATA_DISCONNECTED + && !request_ops->requestQueryDataCall(&IPv4ConnectionStatus, IpFamilyV4)) + { + if (QWDS_PKT_DATA_CONNECTED == IPv4ConnectionStatus && profile->ipv4.Address == 0) { + //killall -9 quectel-CM for MBIM and ATC call + qmierr = request_ops->requestGetIPAddress(profile, IpFamilyV4); + if (qmierr) + IPv4ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + } + + //local ip is different with remote ip + if (QWDS_PKT_DATA_CONNECTED == IPv4ConnectionStatus && check_ipv4_address(profile) == 0) { + request_ops->requestDeactivateDefaultPDP(profile, IpFamilyV4); + IPv4ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + } + } + else { + IPv4ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + } + + if (profile->enable_ipv6 && IPv6ConnectionStatus != QWDS_PKT_DATA_DISCONNECTED) { + if (profile->enable_ipv4 && profile->request_ops != &qmi_request_ops) { + IPv6ConnectionStatus = IPv4ConnectionStatus; + } + else { + request_ops->requestQueryDataCall(&IPv6ConnectionStatus, IpFamilyV6); + } + } + else { + IPv6ConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + } + + if (IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED) { + usbnet_link_change(0, profile); + } + else if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED || IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) { + int link = 0; + if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED) + link |= (1<enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED) + || (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)) { + send_signo_to_main(SIG_EVENT_START); + } + break; + + case SIG_EVENT_STOP: + if (profile->enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED) { + request_ops->requestDeactivateDefaultPDP(profile, IpFamilyV4); + } + if (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) { + if (profile->enable_ipv4 && profile->request_ops != &qmi_request_ops) { + + } + else { + request_ops->requestDeactivateDefaultPDP(profile, IpFamilyV6); + } + } + usbnet_link_change(0, profile); + if (profile->qmi_ops->deinit) + profile->qmi_ops->deinit(); + main_send_event_to_qmidevice(RIL_REQUEST_QUIT); + goto __main_quit; + break; + + default: + break; + } + } + } + + if (fd == qmidevice_control_fd[0]) { + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + switch (triger_event) { + case RIL_INDICATE_DEVICE_DISCONNECTED: + usbnet_link_change(0, profile); + goto __main_quit; + break; + + case RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED: + request_ops->requestRegistrationState(&PSAttachedState); + if (PSAttachedState == 1) { + if ((profile->enable_ipv4 && IPv4ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED) + || (profile->enable_ipv6 && IPv6ConnectionStatus == QWDS_PKT_DATA_DISCONNECTED)) { + send_signo_to_main(SIG_EVENT_START); + } + } else { + SetupCallAllowTime = clock_msec(); + } + break; + + case RIL_UNSOL_DATA_CALL_LIST_CHANGED: + if (IPv4ConnectionStatus == QWDS_PKT_DATA_CONNECTED || IPv6ConnectionStatus == QWDS_PKT_DATA_CONNECTED) { + SetupCallAllowTime = clock_msec() + 1000; //from connect -> disconnect, do not re-dail immediately, wait network stable + } + send_signo_to_main(SIG_EVENT_CHECK); + break; + + case MODEM_REPORT_RESET_EVENT: + { + dbg_time("main recv MODEM RESET SIGNAL"); + main_send_event_to_qmidevice(RIL_REQUEST_QUIT); + g_donot_exit_when_modem_hangup = 1; + goto __main_quit; + } + break; + + case RIL_UNSOL_LOOPBACK_CONFIG_IND: + { + QMI_WDA_SET_LOOPBACK_CONFIG_IND_MSG SetLoopBackInd; + if (read(fd, &SetLoopBackInd, sizeof(SetLoopBackInd)) == sizeof(SetLoopBackInd)) { + profile->loopback_state = SetLoopBackInd.loopback_state.TLVVaule; + profile->replication_factor = le32_to_cpu(SetLoopBackInd.replication_factor.TLVVaule); + dbg_time("SetLoopBackInd: loopback_state=%d, replication_factor=%u", + profile->loopback_state, profile->replication_factor); + if (profile->loopback_state) + send_signo_to_main(SIG_EVENT_START); + } + } + break; +#ifdef CONFIG_REG_QOS_IND + case RIL_UNSOL_GLOBAL_QOS_FLOW_IND_QOS_ID: + { + UINT qos_id = 0; + if (read(fd, &qos_id, sizeof(qos_id)) == sizeof(qos_id)) { + profile->qos_id = qos_id; + } + } + break; +#endif + default: + break; + } + } + } + } + } + +__main_quit: + usbnet_link_change(0, profile); + if (gQmiThreadID && pthread_join(gQmiThreadID, NULL)) { + dbg_time("%s Error joining to listener thread (%s)", __func__, strerror(errno)); + } + + close(signal_control_fd[0]); + close(signal_control_fd[1]); + close(qmidevice_control_fd[0]); + close(qmidevice_control_fd[1]); + dbg_time("%s exit", __func__); + + return 0; +} + +static int quectel_CM(PROFILE_T *profile) +{ + int ret = 0; + char qmichannel[32] = {'\0'}; + char usbnet_adapter[32] = {'\0'}; + + if (profile->expect_adapter[0]) + strncpy(usbnet_adapter, profile->expect_adapter, sizeof(usbnet_adapter)); + + if (qmidevice_detect(qmichannel, usbnet_adapter, sizeof(qmichannel), profile)) { + profile->hardware_interface = HARDWARE_USB; + } + else if (mhidevice_detect(qmichannel, usbnet_adapter, profile)) { + profile->hardware_interface = HARDWARE_PCIE; + } + else if (atdevice_detect(qmichannel, usbnet_adapter, profile)) { + profile->hardware_interface = HARDWARE_PCIE; + } +#ifdef CONFIG_QRTR + else if (1) { + strcpy(qmichannel, "qrtr"); + strcpy(usbnet_adapter, "rmnet_mhi0"); + profile->hardware_interface = HARDWARE_PCIE; + profile->software_interface = SOFTWARE_QRTR; + } +#endif + else { + dbg_time("qmidevice_detect failed"); + goto error; + } + + strncpy(profile->qmichannel, qmichannel, sizeof(profile->qmichannel)); + strncpy(profile->usbnet_adapter, usbnet_adapter, sizeof(profile->usbnet_adapter)); + ql_get_netcard_driver_info(profile->usbnet_adapter); + + if ((profile->hardware_interface == HARDWARE_USB) && profile->usblogfile) + ql_capture_usbmon_log(profile, profile->usblogfile); + + if (profile->hardware_interface == HARDWARE_USB) { + profile->software_interface = get_driver_type(profile); + } + + ql_qmap_mode_detect(profile); + + if (profile->software_interface == SOFTWARE_MBIM) { + dbg_time("Modem works in MBIM mode"); + profile->request_ops = &mbim_request_ops; + profile->qmi_ops = &mbim_dev_ops; + if (!profile->apn || !profile->apn[0]) { + //see FAE-51804 FAE-59811 + dbg_time("When MBIM mode, must specify APN with '-s', or setup data call may fail!"); + exit(-404); //if no such issue on your side, please comment this + } + ret = qmi_main(profile); + } + else if (profile->software_interface == SOFTWARE_QMI) { + dbg_time("Modem works in QMI mode"); + profile->request_ops = &qmi_request_ops; + if (qmidev_is_gobinet(profile->qmichannel)) + profile->qmi_ops = &gobi_qmidev_ops; + else + profile->qmi_ops = &qmiwwan_qmidev_ops; + qmidev_send = profile->qmi_ops->send; + ret = qmi_main(profile); + } + else if (profile->software_interface == SOFTWARE_ECM_RNDIS_NCM) { + dbg_time("Modem works in ECM_RNDIS_NCM mode"); + profile->request_ops = &atc_request_ops; + profile->qmi_ops = &atc_dev_ops; + ret = qmi_main(profile); + } +#ifdef CONFIG_QRTR + else if (profile->software_interface == SOFTWARE_QRTR) { + dbg_time("Modem works in QRTR mode"); + profile->request_ops = &qmi_request_ops; + profile->qmi_ops = &qrtr_qmidev_ops; + qmidev_send = profile->qmi_ops->send; + ret = qmi_main(profile); + } +#endif + else { + dbg_time("unsupport software_interface %d", profile->software_interface); + } + + ql_stop_usbmon_log(profile); + +error: + return ret; +} + +static int parse_user_input(int argc, char **argv, PROFILE_T *profile) { + int opt = 1; + + profile->pdp = CONFIG_DEFAULT_PDP; + profile->profile_index = CONFIG_DEFAULT_PDP; + + if (!strcmp(argv[argc-1], "&")) + argc--; + +#define has_more_argv() ((opt < argc) && (argv[opt][0] != '-')) + while (opt < argc) { + if (argv[opt][0] != '-') { + return usage(argv[0]); + } + + switch (argv[opt++][1]) + { + case 's': + profile->apn = profile->user = profile->password = ""; + if (has_more_argv()) { + profile->apn = argv[opt++]; + } + if (has_more_argv()) { + profile->user = argv[opt++]; + } + if (has_more_argv()) { + profile->password = argv[opt++]; + if (profile->password && profile->password[0]) + profile->auth = 2; //default chap, customers may miss auth + } + if (has_more_argv()) { + const char *auth = argv[opt++]; + + if (!strcmp(auth, "0") || !strcasecmp(auth, "none")) { + profile->auth = 0; + } + else if (!strcmp(auth, "1") || !strcasecmp(auth, "pap")) { + profile->auth = 1; + } + else if (!strcmp(auth, "2") || !strcasecmp(auth, "chap")) { + profile->auth = 2; + } + else if (!strcmp(auth, "3") || !strcasecmp(auth, "MsChapV2")) { + profile->auth = 3; + } + else { + dbg_time("unknow auth '%s'", auth); + return usage(argv[0]); + } + } + break; + + case 'p': + if (has_more_argv()) { + const char *arg = argv[opt++]; + + if (!strcmp(arg, QUECTEL_QMI_PROXY) || !strcmp(arg, QUECTEL_MBIM_PROXY) + || !strcmp(arg, LIBQMI_PROXY) || !strcmp(arg, LIBMBIM_PROXY) || !strcmp(arg, QUECTEL_ATC_PROXY)) { + strncpy(profile->proxy, arg, sizeof(profile->proxy) - 1); + } + else if ((999 < atoi(arg)) && (atoi(arg) < 10000)) { + profile->pincode = arg; + } + else { + dbg_time("unknow -p '%s'", arg); + return usage(argv[0]); + } + } + break; + + case 'm': + if (has_more_argv()) + profile->muxid = argv[opt++][0] - '0' + 0x80; + break; + + case 'n': + if (has_more_argv()) + profile->pdp = argv[opt++][0] - '0'; + break; + + case 'f': + if (has_more_argv()) { + profile->logfile = argv[opt++]; + } + break; + + case 'i': + if (has_more_argv()) { + strncpy(profile->expect_adapter, argv[opt++], sizeof(profile->expect_adapter) - 1); + } + break; + + case 'v': + debug_qmi = 1; + break; + + case 'l': + if (has_more_argv()) { + profile->replication_factor = atoi(argv[opt++]); + if (profile->replication_factor > 0) { + profile->loopback_state = 1; + } + } + break; + + case '4': + profile->enable_ipv4 = 1; + break; + + case '6': + profile->enable_ipv6 = 1; + break; + + case 'd': + profile->no_dhcp = 1; + break; + + case 'u': + if (has_more_argv()) { + profile->usblogfile = argv[opt++]; + } + break; + + case 'b': + profile->enable_bridge = 1; + break; + + case 'k': + if (has_more_argv()) { + profile->kill_pdp = argv[opt++][0] - '0'; + } + break; + + default: + return usage(argv[0]); + break; + } + } + + if (profile->enable_ipv4 != 1 && profile->enable_ipv6 != 1) { // default enable IPv4 + profile->enable_ipv4 = 1; + } + + return 1; +} + +int main(int argc, char *argv[]) +{ + int ret; + PROFILE_T *ctx = &s_profile; + + dbg_time("QConnectManager_Linux_V1.6.5"); + + ret = parse_user_input(argc, argv, ctx); + if (!ret) + return ret; + + if (ctx->kill_pdp) { + return kill_data_call_pdp(ctx->kill_pdp, argv[0]); + } + + if (ctx->logfile) { + logfilefp = fopen(ctx->logfile, "a+"); + if (!logfilefp) { + dbg_time("Fail to open %s, errno: %d(%s)", ctx->logfile, errno, strerror(errno)); + } + } + + signal(SIGINT, ql_sigaction); + signal(SIGTERM, ql_sigaction); + signal(SIGALRM, ql_sigaction); + + do { + ret = quectel_CM(ctx); + if (g_donot_exit_when_modem_hangup > 0) + sleep(3); + } while (g_donot_exit_when_modem_hangup > 0); + + if (logfilefp) { + fclose(logfilefp); + } + + return ret; +} diff --git a/wwan/app/quectel_cm_5G/src/mbim-cm.c b/wwan/app/quectel_cm_5G/src/mbim-cm.c new file mode 100644 index 0000000..15ae772 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/mbim-cm.c @@ -0,0 +1,2426 @@ +/****************************************************************************** + @file mbim-cm.c + @brief MIBIM drivers. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "QMIThread.h" + +#define mbim_debug dbg_time + +#define UUID_BASIC_CONNECT "a289cc33-bcbb-8b4f-b6b0-133ec2aae6df" +//https://docs.microsoft.com/en-us/windows-hardware/drivers/network/mb-5g-data-class-support +#define UUID_BASIC_CONNECT_EXT "3d01dcc5-fef5-4d05-0d3a-bef7058e9aaf" +#define UUID_SMS "533fbeeb-14fe-4467-9f90-33a223e56c3f" +#define UUID_USSD "e550a0c8-5e82-479e-82f7-10abf4c3351f" +#define UUID_PHONEBOOK "4bf38476-1e6a-41db-b1d8-bed289c25bdb" +#define UUID_STK "d8f20131-fcb5-4e17-8602-d6ed3816164c" +#define UUID_AUTH "1d2b5ff7-0aa1-48b2-aa52-50f15767174e" +#define UUID_DSS "c08a26dd-7718-4382-8482-6e0d583c4d0e" +#define uuid_ext_qmux "d1a30bc2-f97a-6e43-bf65-c7e24fb0f0d3" +#define uuid_mshsd "883b7c26-985f-43fa-9804-27d7fb80959c" +#define uuid_qmbe "2d0c12c9-0e6a-495a-915c-8d174fe5d63c" +#define UUID_MSFWID "e9f7dea2-feaf-4009-93ce-90a3694103b6" +#define uuid_atds "5967bdcc-7fd2-49a2-9f5c-b2e70e527db3" +#define uuid_qdu "6427015f-579d-48f5-8c54-f43ed1e76f83" +#define UUID_MS_UICC_LOW_LEVEL "c2f6588e-f037-4bc9-8665-f4d44bd09367" +#define UUID_MS_SARControl "68223D04-9F6C-4E0F-822D-28441FB72340" +#define UUID_VOICEEXTENSIONS "8d8b9eba-37be-449b-8f1e-61cb034a702e" +#define UUID_LIBMBIM_PROXY "838cf7fb-8d0d-4d7f-871e-d71dbefbb39b" + +#define UUID_MBIMContextTypeInternet "7E5E2A7E-4E6F-7272-736B-656E7E5E2A7E" + +typedef unsigned char UINT8; +typedef unsigned short UINT16; +typedef unsigned int UINT32; +typedef unsigned long long UINT64; + +#pragma pack(4) +typedef enum { + MBIM_CID_CMD_TYPE_QUERY = 0, + MBIM_CID_CMD_TYPE_SET = 1, +} MBIM_CID_CMD_TYPE_E; + +typedef enum { + MBIM_CID_DEVICE_CAPS = 1, + MBIM_CID_SUBSCRIBER_READY_STATUS = 2, + MBIM_CID_RADIO_STATE = 3, MBIM_CID_PIN = 4, + MBIM_CID_PIN_LIS = 5, + MBIM_CID_HOME_PROVIDER = 6, + MBIM_CID_PREFERRED_PROVIDERS = 7, + MBIM_CID_VISIBLE_PROVIDERS = 8, + MBIM_CID_REGISTER_STATE = 9, + MBIM_CID_PACKET_SERVICE = 10, + MBIM_CID_SIGNAL_STATE = 11, + MBIM_CID_CONNECT = 12, + MBIM_CID_PROVISIONED_CONTEXTS = 13, + MBIM_CID_SERVICE_ACTIVATION = 14, + MBIM_CID_IP_CONFIGURATION = 15, + MBIM_CID_DEVICE_SERVICES = 16, + MBIM_CID_DEVICE_SERVICE_SUBSCRIBE_LIST = 19, + MBIM_CID_PACKET_STATISTICS = 20, + MBIM_CID_NETWORK_IDLE_HINT = 21, + MBIM_CID_EMERGENCY_MODE = 22, + MBIM_CID_IP_PACKET_FILTERS = 23, + MBIM_CID_MULTICARRIER_PROVIDERS = 24, +} UUID_BASIC_CONNECT_CID_E; + +typedef enum{ + MBIM_CID_MS_PROVISIONED_CONTEXT_V2 = 1, + MBIM_CID_MS_NETWORK_BLACKLIST = 2, + MBIM_CID_MS_LTE_ATTACH_CONFIG = 3, + MBIM_CID_MS_LTE_ATTACH_STATUS = 4, + MBIM_CID_MS_SYS_CAPS = 5, + MBIM_CID_MS_DEVICE_CAPS_V2 = 6, + MBIM_CID_MS_DEVICE_SLOT_MAPPING = 7, + MBIM_CID_MS_SLOT_INFO_STATUS = 8, + MBIM_CID_MS_PCO = 9, + MBIM_CID_MS_DEVICE_RESET = 10, + MBIM_CID_MS_BASE_STATIONS_INFO = 11, + MBIM_CID_MS_LOCATION_INFO_STATUS = 12, + MBIM_CID_NOT_DEFINED = 13, + MBIM_CID_MS_PIN_EX = 14, + MBIM_CID_MS_VERSION = 15, +} UUID_BASIC_CONNECT_EXT_CID_E; + +typedef enum { + MBIM_CID_SMS_CONFIGURATION = 1, // Y Y Y + MBIM_CID_SMS_READ = 2, // N Y Y + MBIM_CID_SMS_SEND = 3, // Y N N + MBIM_CID_SMS_DELETE = 4, // Y N N + MBIM_CID_SMS_MESSAGE_STORE_STATUS = 5, // N Y Y +} UUID_SMS_CID_E; + +typedef enum { + MBIM_CID_DSS_CONNECT = 1, // Y N N +} UUID_DSS_CID_E; + +typedef enum{ + MBIM_OPEN_MSG = 1, + MBIM_CLOSE_MSG = 2, + MBIM_COMMAND_MSG = 3, + MBIM_HOST_ERROR_MSG = 4, + MBIM_OPEN_DONE = 0x80000001, + MBIM_CLOSE_DONE = 0x80000002, + MBIM_COMMAND_DONE = 0x80000003, + MBIM_FUNCTION_ERROR_MSG = 0x80000004, + MBIM_INDICATE_STATUS_MSG = 0x80000007, +} MBIM_MSG_Type_E; + +typedef enum { /*< since=1.10 >*/ + MBIM_CID_PROXY_CONTROL_UNKNOWN = 0, + MBIM_CID_PROXY_CONTROL_CONFIGURATION = 1 +} UUID_LIBMBIM_PROXY_CID_E; + +typedef enum { + MBIM_CID_MS_UICC_ATR = 1, + MBIM_CID_MS_UICC_OPEN_CHANNEL = 2, + MBIM_CID_MS_UICC_CLOSE_CHANNEL = 3, + MBIM_CID_MS_UICC_APDU = 4, + MBIM_CID_MS_UICC_TERMINAL_CAPABILITY = 5, + MBIM_CID_MS_UICC_RESET = 6, + MBIM_CID_MS_APP_LIST = 7, +} UUID_MS_UICC_CID_E; + +typedef enum { + MBIM_ERROR_TIMEOUT_FRAGMENT = 1, + MBIM_ERROR_FRAGMENT_OUT_OF_SEQUENCE = 2, + MBIM_ERROR_LENGTH_MISMATCH = 3, + MBIM_ERROR_DUPLICATED_TID = 4, + MBIM_ERROR_NOT_OPENED = 5, + MBIM_ERROR_UNKNOWN = 6, + MBIM_ERROR_CANCEL = 7, + MBIM_ERROR_MAX_TRANSFER = 8, +} MBIM_ERROR_E; + +typedef enum { + MBIM_STATUS_SUCCESS = 0, + MBIM_STATUS_BUSY = 1, + MBIM_STATUS_FAILURE = 2, + MBIM_STATUS_SIM_NOT_INSERTED = 3, + MBIM_STATUS_BAD_SIM = 4, + MBIM_STATUS_PIN_REQUIRED = 5, + MBIM_STATUS_PIN_DISABLED = 6, + MBIM_STATUS_NOT_REGISTERED = 7, + MBIM_STATUS_PROVIDERS_NOT_FOUND = 8, + MBIM_STATUS_NO_DEVICE_SUPPORT = 9, + MBIM_STATUS_PROVIDER_NOT_VISIBLE = 10, + MBIM_STATUS_DATA_CLASS_NOT_AVAILABL = 11, + MBIM_STATUS_PACKET_SERVICE_DETACHED = 12, +} MBIM_STATUS_CODES_E; + +typedef enum { + MBIMPacketServiceActionAttach = 0, + MBIMPacketServiceActionDetach = 1, +} MBIM_PACKET_SERVICE_ACTION_E; + +typedef enum { + MBIMPacketServiceStateUnknown = 0, + MBIMPacketServiceStateAttaching = 1, + MBIMPacketServiceStateAttached = 2, + MBIMPacketServiceStateDetaching = 3, + MBIMPacketServiceStateDetached = 4, +} MBIM_PACKET_SERVICE_STATE_E; + +static const char *MBIMPacketServiceStateStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMPacketServiceStateUnknown, "Unknown"}, + {MBIMPacketServiceStateAttaching, "Attaching"}, + {MBIMPacketServiceStateAttached, "Attached"}, + {MBIMPacketServiceStateDetaching, "Detaching"}, + {MBIMPacketServiceStateDetached, "Detached"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +typedef enum { + MBIMDataClassNone = 0x0, + MBIMDataClassGPRS = 0x1, + MBIMDataClassEDGE = 0x2, + MBIMDataClassUMTS = 0x4, + MBIMDataClassHSDPA = 0x8, + MBIMDataClassHSUPA = 0x10, + MBIMDataClassLTE = 0x20, + MBIMDataClass5G_NSA = 0x40, + MBIMDataClass5G_SA = 0x80, + MBIMDataClass1XRTT = 0x10000, + MBIMDataClass1XEVDO = 0x20000, + MBIMDataClass1XEVDORevA = 0x40000, + MBIMDataClass1XEVDV = 0x80000, + MBIMDataClass3XRTT = 0x100000, + MBIMDataClass1XEVDORevB = 0x200000, + MBIMDataClassUMB = 0x400000, + MBIMDataClassCustom = 0x80000000, +} MBIM_DATA_CLASS_E; + +static const char *MBIMDataClassStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMDataClassNone, "None"}, + {MBIMDataClassGPRS, "GPRS"}, + {MBIMDataClassEDGE, "EDGE"}, + {MBIMDataClassUMTS, "UMTS"}, + {MBIMDataClassHSDPA, "HSDPA"}, + {MBIMDataClassHSUPA, "HSUPA"}, + {MBIMDataClassLTE, "LTE"}, + {MBIMDataClass5G_NSA, "5G_NSA"}, + {MBIMDataClass5G_SA, "5G_SA"}, + {MBIMDataClass1XRTT, "1XRTT"}, + {MBIMDataClass1XEVDO, "1XEVDO"}, + {MBIMDataClass1XEVDORevA, "1XEVDORevA"}, + {MBIMDataClass1XEVDV, "1XEVDV"}, + {MBIMDataClass3XRTT, "3XRTT"}, + {MBIMDataClass1XEVDORevB, "1XEVDORevB"}, + {MBIMDataClassUMB, "UMB"}, + {MBIMDataClassCustom, "Custom"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Unknow"; +}; + +typedef struct { + UINT32 NwError; + UINT32 PacketServiceState; //MBIM_PACKET_SERVICE_STATE_E + UINT32 HighestAvailableDataClass; //MBIM_DATA_CLASS_E + UINT64 UplinkSpeed; + UINT64 DownlinkSpeed; +} MBIM_PACKET_SERVICE_INFO_T; + +typedef struct { + UINT32 NwError; + UINT32 PacketServiceState; //MBIM_PACKET_SERVICE_STATE_E + UINT32 CurrentDataClass; //MBIM_DATA_CLASS_E + UINT64 UplinkSpeed; + UINT64 DownlinkSpeed; + UINT32 FrequencyRange; +} MBIM_PACKET_SERVICE_INFO_V2_T; + +typedef enum { + MBIMSubscriberReadyStateNotInitialized = 0, + MBIMSubscriberReadyStateInitialized = 1, + MBIMSubscriberReadyStateSimNotInserted = 2, + MBIMSubscriberReadyStateBadSim = 3, + MBIMSubscriberReadyStateFailure = 4, + MBIMSubscriberReadyStateNotActivated = 5, + MBIMSubscriberReadyStateDeviceLocked = 6, +}MBIM_SUBSCRIBER_READY_STATE_E; + +static const char *MBIMSubscriberReadyStateStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMSubscriberReadyStateNotInitialized, "NotInitialized"}, + {MBIMSubscriberReadyStateInitialized, "Initialized"}, + {MBIMSubscriberReadyStateSimNotInserted, "NotInserted"}, + {MBIMSubscriberReadyStateBadSim, "BadSim"}, + {MBIMSubscriberReadyStateFailure, "Failure"}, + {MBIMSubscriberReadyStateNotActivated, "NotActivated"}, + {MBIMSubscriberReadyStateDeviceLocked, "DeviceLocked"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +typedef struct { + UINT32 DeviceType; //MBIM_DEVICE_TYPE + UINT32 CellularClass; //MBIM_CELLULAR_CLASS + UINT32 VoiceClass; //MBIM_VOICE_CLASS + UINT32 SimClass; //MBIM_SIM_CLASS + UINT32 DataClass; //MBIM_DATA_CLASS + UINT32 SmsCaps; //MBIM_SMS_CAPS + UINT32 ControlCaps; //MBIM_CTRL_CAPS + UINT32 MaxSessions; + UINT32 CustomDataClassOffset; + UINT32 CustomDataClassSize; + UINT32 DeviceIdOffset; + UINT32 DeviceIdSize; + UINT32 FirmwareInfoOffset; + UINT32 FirmwareInfoSize; + UINT32 HardwareInfoOffset; + UINT32 HardwareInfoSize; + UINT8 DataBuffer[0]; //DeviceId FirmwareInfo HardwareInfo +} MBIM_DEVICE_CAPS_INFO_T; + +typedef enum { + MBIMRadioOff = 0, + MBIMRadioOn = 1, +} MBIM_RADIO_SWITCH_STATE_E; + +typedef struct { + MBIM_RADIO_SWITCH_STATE_E RadioState; +} MBIM_SET_RADIO_STATE_T; + +typedef struct { + MBIM_RADIO_SWITCH_STATE_E HwRadioState; + MBIM_RADIO_SWITCH_STATE_E SwRadioState; +} MBIM_RADIO_STATE_INFO_T; + +typedef enum { + MBIMReadyInfoFlagsNone, + MBIMReadyInfoFlagsProtectUniqueID, +}MBIM_UNIQUE_ID_FLAGS; + +typedef struct { + UINT32 ReadyState; + UINT32 SubscriberIdOffset; + UINT32 SubscriberIdSize; + UINT32 SimIccIdOffset; + UINT32 SimIccIdSize; + UINT32 ReadyInfo; + UINT32 ElementCount; + UINT8 *TelephoneNumbersRefList; + UINT8 *DataBuffer; +} MBIM_SUBSCRIBER_READY_STATUS_T; + +typedef enum { + MBIMRegisterActionAutomatic, + MBIMRegisterActionManual, +}MBIM_REGISTER_ACTION_E; + +typedef enum { + MBIMRegisterStateUnknown = 0, + MBIMRegisterStateDeregistered = 1, + MBIMRegisterStateSearching = 2, + MBIMRegisterStateHome = 3, + MBIMRegisterStateRoaming = 4, + MBIMRegisterStatePartner = 5, + MBIMRegisterStateDenied = 6, +}MBIM_REGISTER_STATE_E; + +typedef enum { + MBIMRegisterModeUnknown = 0, + MBIMRegisterModeAutomatic = 1, + MBIMRegisterModeManual = 2, +}MBIM_REGISTER_MODE_E; + +static const char *MBIMRegisterStateStr(int _val) { + struct { int val;char *name;} _enumstr[] ={ + {MBIMRegisterStateUnknown, "Unknown"}, + {MBIMRegisterStateDeregistered, "Deregistered"}, + {MBIMRegisterStateSearching, "Searching"}, + {MBIMRegisterStateHome, "Home"}, + {MBIMRegisterStateRoaming, "Roaming"}, + {MBIMRegisterStatePartner, "Partner"}, + {MBIMRegisterStateDenied, "Denied"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +static const char *MBIMRegisterModeStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMRegisterModeUnknown, "Unknown"}, + {MBIMRegisterModeAutomatic, "Automatic"}, + {MBIMRegisterModeManual, "Manual"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +typedef enum { + MBIM_REGISTRATION_NONE, + MBIM_REGISTRATION_MANUAL_SELECTION_NOT_AVAILABLE, + MBIM_REGISTRATION_PACKET_SERVICE_AUTOMATIC_ATTACH, +}MBIM_REGISTRATION_FLAGS_E; + +typedef struct { + UINT32 NwError; + UINT32 RegisterState; //MBIM_REGISTER_STATE_E + UINT32 RegisterMode; + UINT32 AvailableDataClasses; + UINT32 CurrentCellularClass; + UINT32 ProviderIdOffset; + UINT32 ProviderIdSize; + UINT32 ProviderNameOffset; + UINT32 ProviderNameSize; + UINT32 RoamingTextOffset; + UINT32 RoamingTextSize; + UINT32 RegistrationFlag; + UINT8 *DataBuffer; +} MBIM_REGISTRATION_STATE_INFO_T; + +typedef struct { + UINT32 NwError; + UINT32 RegisterState; //MBIM_REGISTER_STATE_E + UINT32 RegisterMode; + UINT32 AvailableDataClasses; + UINT32 CurrentCellularClass; + UINT32 ProviderIdOffset; + UINT32 ProviderIdSize; + UINT32 ProviderNameOffset; + UINT32 ProviderNameSize; + UINT32 RoamingTextOffset; + UINT32 RoamingTextSize; + UINT32 RegistrationFlag; + UINT32 PreferredDataClass; + UINT8 *DataBuffer; +} MBIM_REGISTRATION_STATE_INFO_V2_T; + +typedef struct { + UINT32 MessageType; //Specifies the MBIM message type. + UINT32 MessageLength; //Specifies the total length of this MBIM message in bytes. + /* Specifies the MBIM message id value. This value is used to match host sent messages with function responses. + This value must be unique among all outstanding transactions. + For notifications, the TransactionId must be set to 0 by the function */ + UINT32 TransactionId; +} MBIM_MESSAGE_HEADER; + +typedef struct { + UINT32 TotalFragments; //this field indicates how many fragments there are intotal. + UINT32 CurrentFragment; //This field indicates which fragment this message is. Values are 0 to TotalFragments?\1 +} MBIM_FRAGMENT_HEADER; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + UINT32 MaxControlTransfer; +} MBIM_OPEN_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + UINT32 Status; //MBIM_STATUS_CODES_E +} MBIM_OPEN_DONE_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; +} MBIM_CLOSE_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + UINT32 Status; +} MBIM_CLOSE_DONE_T; + +typedef struct { + UINT8 uuid[16]; +} UUID_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + MBIM_FRAGMENT_HEADER FragmentHeader; + UUID_T DeviceServiceId; //A 16 byte UUID that identifies the device service the following CID value applies. + UINT32 CID; //Specifies the CID that identifies the parameter being queried for + UINT32 CommandType; //0 for a query operation, 1 for a Set operation + UINT32 InformationBufferLength; //Size of the Total InformationBuffer, may be larger than current message if fragmented. + UINT8 InformationBuffer[0]; //Data supplied to device specific to the CID +} MBIM_COMMAND_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + MBIM_FRAGMENT_HEADER FragmentHeader; + UUID_T DeviceServiceId; //A 16 byte UUID that identifies the device service the following CID value applies. + UINT32 CID; //Specifies the CID that identifies the parameter being queried for + UINT32 Status; + UINT32 InformationBufferLength; //Size of the Total InformationBuffer, may be larger than current message if fragmented. + UINT8 InformationBuffer[0]; //Data supplied to device specific to the CID +} MBIM_COMMAND_DONE_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + UINT32 ErrorStatusCode; +} MBIM_HOST_ERROR_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + UINT32 ErrorStatusCode; +} MBIM_FUNCTION_ERROR_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + MBIM_FRAGMENT_HEADER FragmentHeader; + UUID_T DeviceServiceId; //A 16 byte UUID that identifies the device service the following CID value applies. + UINT32 CID; //Specifies the CID that identifies the parameter being queried for + UINT32 InformationBufferLength; //Size of the Total InformationBuffer, may be larger than current message if fragmented. + UINT8 InformationBuffer[0]; //Data supplied to device specific to the CID +} MBIM_INDICATE_STATUS_MSG_T; + +typedef struct { + UINT32 offset; + UINT32 size; +} OL_PAIR_LIST; + +typedef struct { + UUID_T DeviceServiceId; + UINT32 DssPayload; + UINT32 MaxDssInstances; + UINT32 CidCount; + UINT32 CidList[]; +} MBIM_DEVICE_SERVICE_ELEMENT_T; + +typedef struct { + UINT32 DeviceServicesCount; + UINT32 MaxDssSessions; + OL_PAIR_LIST DeviceServicesRefList[]; +} MBIM_DEVICE_SERVICES_INFO_T; + +typedef enum { + MBIMActivationCommandDeactivate = 0, + MBIMActivationCommandActivate = 1, +} MBIM_ACTIVATION_COMMAND_E; + +typedef enum { + MBIMCompressionNone = 0, + MBIMCompressionEnable = 1, +} MBIM_COMPRESSION_E; + +typedef enum { + MBIMAuthProtocolNone = 0, + MBIMAuthProtocolPap = 1, + MBIMAuthProtocolChap = 2, + MBIMAuthProtocolMsChapV2 = 3, +} MBIM_AUTH_PROTOCOL_E; + +typedef enum { + MBIMContextIPTypeDefault = 0, + MBIMContextIPTypeIPv4 = 1, + MBIMContextIPTypeIPv6 = 2, + MBIMContextIPTypeIPv4v6 = 3, + MBIMContextIPTypeIPv4AndIPv6 = 4, +} MBIM_CONTEXT_IP_TYPE_E; + +typedef enum { + MBIMActivationStateUnknown = 0, + MBIMActivationStateActivated = 1, + MBIMActivationStateActivating = 2, + MBIMActivationStateDeactivated = 3, + MBIMActivationStateDeactivating = 4, +} MBIM_ACTIVATION_STATE_E; + +typedef enum { + MBIMVoiceCallStateNone = 0, + MBIMVoiceCallStateInProgress = 1, + MBIMVoiceCallStateHangUp = 2, +} MBIM_VOICECALL_STATE_E; + +static const char *MBIMMSGTypeStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIM_OPEN_MSG, "MBIM_OPEN_MSG"}, + {MBIM_CLOSE_MSG, "MBIM_CLOSE_MSG"}, + {MBIM_COMMAND_MSG, "MBIM_COMMAND_MSG"}, + {MBIM_HOST_ERROR_MSG, "MBIM_HOST_ERROR_MSG"}, + {MBIM_OPEN_DONE, "MBIM_OPEN_DONE"}, + {MBIM_CLOSE_DONE, "MBIM_CLOSE_DONE"}, + {MBIM_COMMAND_DONE, "MBIM_COMMAND_DONE"}, + {MBIM_FUNCTION_ERROR_MSG, "MBIM_FUNCTION_ERROR_MSG"}, + {MBIM_INDICATE_STATUS_MSG, "MBIM_INDICATE_STATUS_MSG"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "MBIMMSGTypeUnknow"; +}; + +static const char *MBIMContextIPTypeStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMContextIPTypeDefault, "MBIMContextIPTypeDefault"}, + {MBIMContextIPTypeIPv4, "MBIMContextIPTypeIPv4"}, + {MBIMContextIPTypeIPv6, "MBIMContextIPTypeIPv6"}, + {MBIMContextIPTypeIPv4v6, "MBIMContextIPTypeIPv4v6"}, + {MBIMContextIPTypeIPv4AndIPv6, "MBIMContextIPTypeIPv4AndIPv6"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "MBIMContextIPTypeUnknow"; +} + +static const char *MBIMActivationStateStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMActivationStateUnknown, "Unknown"}, + {MBIMActivationStateActivated, "Activated"}, + {MBIMActivationStateActivating, "Activating"}, + {MBIMActivationStateDeactivated, "Deactivated"}, + {MBIMActivationStateDeactivating, "Deactivating"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +static const char *MBIMVoiceCallStateStr(int _val) { + struct { int val;char *name;} _enumstr[] = { + {MBIMVoiceCallStateNone, "None"}, + {MBIMVoiceCallStateInProgress, "InProgress"}, + {MBIMVoiceCallStateHangUp, "HangUp"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (_val == _enumstr[idx].val) + return _enumstr[idx].name; + } + + return "Undefined"; +}; + +typedef struct { + const char *uuid; + UINT32 cid; + const char *name; +} UUID_CID_STR; + +static const UUID_CID_STR uuid_cid_string[] = { + {UUID_BASIC_CONNECT, MBIM_CID_DEVICE_CAPS, "MBIM_CID_DEVICE_CAPS"}, + {UUID_BASIC_CONNECT, MBIM_CID_SUBSCRIBER_READY_STATUS, "MBIM_CID_SUBSCRIBER_READY_STATUS"}, + {UUID_BASIC_CONNECT, MBIM_CID_RADIO_STATE, "MBIM_CID_RADIO_STATE"}, + {UUID_BASIC_CONNECT, MBIM_CID_PIN, "MBIM_CID_PIN"}, + {UUID_BASIC_CONNECT, MBIM_CID_PIN_LIS, "MBIM_CID_PIN_LIS"}, + {UUID_BASIC_CONNECT, MBIM_CID_HOME_PROVIDER, "MBIM_CID_HOME_PROVIDER"}, + {UUID_BASIC_CONNECT, MBIM_CID_PREFERRED_PROVIDERS, "MBIM_CID_PREFERRED_PROVIDERS"}, + {UUID_BASIC_CONNECT, MBIM_CID_VISIBLE_PROVIDERS, "MBIM_CID_VISIBLE_PROVIDERS"}, + {UUID_BASIC_CONNECT, MBIM_CID_REGISTER_STATE, "MBIM_CID_REGISTER_STATE"}, + {UUID_BASIC_CONNECT, MBIM_CID_PACKET_SERVICE, "MBIM_CID_PACKET_SERVICE"}, + {UUID_BASIC_CONNECT, MBIM_CID_SIGNAL_STATE, "MBIM_CID_SIGNAL_STATE"}, + {UUID_BASIC_CONNECT, MBIM_CID_CONNECT, "MBIM_CID_CONNECT"}, + {UUID_BASIC_CONNECT, MBIM_CID_PROVISIONED_CONTEXTS, "MBIM_CID_PROVISIONED_CONTEXTS"}, + {UUID_BASIC_CONNECT, MBIM_CID_SERVICE_ACTIVATION, "MBIM_CID_SERVICE_ACTIVATION"}, + {UUID_BASIC_CONNECT, MBIM_CID_IP_CONFIGURATION, "MBIM_CID_IP_CONFIGURATION"}, + {UUID_BASIC_CONNECT, MBIM_CID_DEVICE_SERVICES, "MBIM_CID_DEVICE_SERVICES"}, + {UUID_BASIC_CONNECT, MBIM_CID_DEVICE_SERVICE_SUBSCRIBE_LIST, "MBIM_CID_DEVICE_SERVICE_SUBSCRIBE_LIST"}, + {UUID_BASIC_CONNECT, MBIM_CID_PACKET_STATISTICS, "MBIM_CID_PACKET_STATISTICS"}, + {UUID_BASIC_CONNECT, MBIM_CID_NETWORK_IDLE_HINT, "MBIM_CID_NETWORK_IDLE_HINT"}, + {UUID_BASIC_CONNECT, MBIM_CID_EMERGENCY_MODE, "MBIM_CID_EMERGENCY_MODE"}, + {UUID_BASIC_CONNECT, MBIM_CID_IP_PACKET_FILTERS, "MBIM_CID_IP_PACKET_FILTERS"}, + {UUID_BASIC_CONNECT, MBIM_CID_MULTICARRIER_PROVIDERS, "MBIM_CID_MULTICARRIER_PROVIDERS"}, + + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_PROVISIONED_CONTEXT_V2, "MBIM_CID_MS_PROVISIONED_CONTEXT_V2"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_NETWORK_BLACKLIST, "MBIM_CID_MS_NETWORK_BLACKLIST"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_LTE_ATTACH_CONFIG, "MBIM_CID_MS_LTE_ATTACH_CONFIG"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_LTE_ATTACH_STATUS, "MBIM_CID_MS_LTE_ATTACH_STATUS"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_SYS_CAPS, "MBIM_CID_MS_SYS_CAPS"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_DEVICE_CAPS_V2, "MBIM_CID_MS_DEVICE_CAPS_V2"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_DEVICE_SLOT_MAPPING, "MBIM_CID_MS_DEVICE_SLOT_MAPPING"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_SLOT_INFO_STATUS, "MBIM_CID_MS_SLOT_INFO_STATUS"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_PCO, "MBIM_CID_MS_PCO"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_DEVICE_RESET, "MBIM_CID_MS_DEVICE_RESET"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_BASE_STATIONS_INFO, "MBIM_CID_MS_BASE_STATIONS_INFO"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_LOCATION_INFO_STATUS, "MBIM_CID_MS_LOCATION_INFO_STATUS"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_NOT_DEFINED, "MBIM_CID_NOT_DEFINED"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_PIN_EX, "MBIM_CID_MS_PIN_EX"}, + {UUID_BASIC_CONNECT_EXT, MBIM_CID_MS_VERSION, "MBIM_CID_MS_VERSION"}, + + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_ATR, "MBIM_CID_MS_UICC_ATR"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_OPEN_CHANNEL, "MBIM_CID_MS_UICC_OPEN_CHANNEL"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_CLOSE_CHANNEL, "MBIM_CID_MS_UICC_CLOSE_CHANNEL"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_APDU, "MBIM_CID_MS_UICC_APDU"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_TERMINAL_CAPABILITY, "MBIM_CID_MS_UICC_TERMINAL_CAPABILITY"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_UICC_RESET, "MBIM_CID_MS_UICC_RESET"}, + {UUID_MS_UICC_LOW_LEVEL, MBIM_CID_MS_APP_LIST, "MBIM_CID_MS_APP_LIST"}, +}; + +typedef struct { + UINT32 SessionId; + UINT32 ActivationCommand; //MBIM_ACTIVATION_COMMAND_E + UINT32 AccessStringOffset; + UINT32 AccessStringSize; + UINT32 UserNameOffset; + UINT32 UserNameSize; + UINT32 PasswordOffset; + UINT32 PasswordSize; + UINT32 Compression; //MBIM_COMPRESSION_E + UINT32 AuthProtocol; //MBIM_AUTH_PROTOCOL_E + UINT32 IPType; //MBIM_CONTEXT_IP_TYPE_E + UUID_T ContextType; + UINT8 DataBuffer[0]; /* apn, username, password */ +} MBIM_SET_CONNECT_T; + +typedef struct { + UINT32 SessionId; + UINT32 ActivationState; //MBIM_ACTIVATION_STATE_E + UINT32 VoiceCallState; + UINT32 IPType; //MBIM_CONTEXT_IP_TYPE_E + UUID_T ContextType; + UINT32 NwError; +} MBIM_CONNECT_T; + +typedef struct { + UINT32 OnLinkPrefixLength; + UINT8 IPv4Address[4]; +} MBIM_IPV4_ELEMENT_T; + +typedef struct { + UINT32 OnLinkPrefixLength; + UINT8 IPv6Address[16]; +} MBIM_IPV6_ELEMENT_T; + +typedef struct { + UINT32 SessionId; + UINT32 IPv4ConfigurationAvailable; //bit0~Address, bit1~gateway, bit2~DNS, bit3~MTU + UINT32 IPv6ConfigurationAvailable; //bit0~Address, bit1~gateway, bit2~DNS, bit3~MTU + UINT32 IPv4AddressCount; + UINT32 IPv4AddressOffset; + UINT32 IPv6AddressCount; + UINT32 IPv6AddressOffset; + UINT32 IPv4GatewayOffset; + UINT32 IPv6GatewayOffset; + UINT32 IPv4DnsServerCount; + UINT32 IPv4DnsServerOffset; + UINT32 IPv6DnsServerCount; + UINT32 IPv6DnsServerOffset; + UINT32 IPv4Mtu; + UINT32 IPv6Mtu; + UINT8 DataBuffer[]; +} MBIM_IP_CONFIGURATION_INFO_T; + +typedef struct { + UINT32 RSRP; + UINT32 SNR; + UINT32 RSRPThreshold; + UINT32 SNRThreshold; + UINT32 SystemType; +} MBIM_RSRP_SNR_INFO_T; + +typedef struct { + UINT32 Elementcount; + MBIM_RSRP_SNR_INFO_T RsrpSnr[0]; +} MBIM_RSRP_SNR_T; + +typedef struct { + UINT32 Rssi; + UINT32 ErrorRate; + UINT32 SignalStrengthInterval; + UINT32 RssiThreshold; + UINT32 ErrorRateThreshold; +} MBIM_SIGNAL_STATE_INFO_T; + +typedef struct { + UINT32 Rssi; + UINT32 ErrorRate; + UINT32 SignalStrengthInterval; + UINT32 RssiThreshold; + UINT32 ErrorRateThreshold; + UINT32 RsrpSnrOffset; + UINT32 RsrpSnrSize; + UINT8 DataBuffer[]; +} MBIM_SIGNAL_STATE_INFO_V2_T; + +typedef struct { + UINT32 SignalStrengthInterval; + UINT32 RssiThreshold; + UINT32 ErrorRateThreshold; +} MBIM_SET_SIGNAL_STATE_T; + +typedef struct { + UINT32 DevicePathOffset; + UINT32 DevicePathSize; + UINT32 Timeout; + UINT8 DataBuffer[]; +} MBIM_LIBQMI_PROXY_CONFIG_T; + +typedef struct { + UINT32 AtrSize; + UINT32 AtrOffset; + UINT8 DataBuffer[]; +} MBIM_MS_ATR_INFO_T; + +#pragma pack() + +static pthread_t s_tid_reader = 0; +static int mbim_verbose = 0; +static UINT32 TransactionId = 1; +static unsigned mbim_default_timeout = 30000; +static const char *mbim_apn = NULL; +static const char *mbim_user = NULL; +static const char *mbim_passwd = NULL; +static int mbim_iptype = MBIMContextIPTypeDefault; +static int mbim_auth = MBIMAuthProtocolNone; +static int mbim_sessionID = 0; +static int mbim_fd = -1; +static MBIM_MESSAGE_HEADER *mbim_pRequest; +static MBIM_MESSAGE_HEADER *mbim_pResponse; + +static unsigned int qmi_over_mbim_support = 0; +static int qmi_over_mbim_sk[2] = {-1, -1}; +static pthread_mutex_t mbim_command_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t mbim_command_cond = PTHREAD_COND_INITIALIZER; +static int mbim_ms_version = 1; +static uint8_t qmi_over_mbim_nas = 0; +int qmi_over_mbim_qmidev_send(PQCQMIMSG pQMI); + +static const UUID_T * str2uuid(const char *str) { + static UUID_T uuid; + UINT32 d[16]; + char tmp[16*2+4+1]; + unsigned i = 0; + + while (str[i]) { + tmp[i] = tolower(str[i]); + i++; + } + tmp[i] = '\0'; + + sscanf(tmp, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", + &d[0], &d[1], &d[2], &d[3], &d[4], &d[5], &d[6], &d[7], + &d[8], &d[9], &d[10], &d[11], &d[12], &d[13], &d[14], &d[15]); + + for (i = 0; i < 16; i++) { + uuid.uuid[i] = d[i]&0xFF; + } + + return &uuid; +} + +static void wchar2char(const char *src, size_t src_size, char *dst, size_t dst_len) { + size_t i; + + for (i = 0; i < (dst_len-1) && i < (src_size/2); i++) + dst[i] = src[i*2]; + dst[i] = 0; +} + +static size_t char2wchar(const char *src, size_t src_len, uint8_t *dst, size_t dst_len) { + size_t i; + + if (src_len > (dst_len/2)) + src_len = (dst_len/2); + + for (i = 0; i < src_len; i++) { + *dst++ = *src++; + *dst++ = 0; + } + + return i*2; +} + +#define mbim_alloc( _size) malloc(_size) +#define mbim_free(_mem) do { if (_mem) { free(_mem); _mem = NULL;}} while(0) + +static int mbim_open_state = 0; +static MBIM_SUBSCRIBER_READY_STATE_E ReadyState = MBIMSubscriberReadyStateNotInitialized; +static MBIM_REGISTER_STATE_E RegisterState = MBIMRegisterStateUnknown; +static MBIM_PACKET_SERVICE_STATE_E PacketServiceState = MBIMPacketServiceStateUnknown; +static MBIM_ACTIVATION_STATE_E ActivationState = MBIMActivationStateUnknown; +static MBIM_SUBSCRIBER_READY_STATE_E oldReadyState = MBIMSubscriberReadyStateNotInitialized; +static MBIM_REGISTER_STATE_E oldRegisterState = MBIMRegisterStateUnknown; +static MBIM_PACKET_SERVICE_STATE_E oldPacketServiceState = MBIMPacketServiceStateUnknown; +static MBIM_ACTIVATION_STATE_E oldActivationState = MBIMActivationStateUnknown; +static int mbim_update_state(void); + +static __inline uint32_t mbim2qmi_ipv4addr(uint32_t addr) { + return (addr>>24) | (addr>>8&0xff00) | (addr<<8&0xff0000) | (addr<<24); +} + +static __inline void mbim2qmi_ipv6addr(const unsigned char *src, unsigned char *dst) { + int i; + + for (i = 0; i < 16 ; i++) { + dst[i] = src[i]; + } +} + +static MBIM_MESSAGE_HEADER *compose_open_command(UINT32 MaxControlTransfer) +{ + MBIM_OPEN_MSG_T *pRequest = (MBIM_OPEN_MSG_T *)mbim_alloc(sizeof(MBIM_OPEN_MSG_T)); + + if(!pRequest) + return NULL; + + pRequest->MessageHeader.MessageType = htole32(MBIM_OPEN_MSG); + pRequest->MessageHeader.MessageLength = htole32(sizeof(MBIM_OPEN_MSG_T)); + pRequest->MessageHeader.TransactionId = htole32(TransactionId++); + pRequest->MaxControlTransfer = htole32(MaxControlTransfer); + + return &pRequest->MessageHeader; +} + +static MBIM_MESSAGE_HEADER *compose_close_command(void) +{ + MBIM_CLOSE_MSG_T *pRequest = (MBIM_CLOSE_MSG_T *)mbim_alloc(sizeof(MBIM_CLOSE_MSG_T)); + + if(!pRequest) + return NULL; + + pRequest->MessageHeader.MessageType = htole32(MBIM_CLOSE_MSG); + pRequest->MessageHeader.MessageLength = htole32(sizeof(MBIM_CLOSE_MSG_T)); + pRequest->MessageHeader.TransactionId = htole32(TransactionId++); + + return &pRequest->MessageHeader; +} + +static MBIM_MESSAGE_HEADER *compose_basic_connect_command(UINT32 CID, UINT32 CommandType, void *pInformationBuffer, UINT32 InformationBufferLength) +{ + MBIM_COMMAND_MSG_T *pRequest = (MBIM_COMMAND_MSG_T *)mbim_alloc(sizeof(MBIM_COMMAND_MSG_T) + InformationBufferLength); + + if (!pRequest) + return NULL; + + pRequest->MessageHeader.MessageType = htole32(MBIM_COMMAND_MSG); + pRequest->MessageHeader.MessageLength = htole32((sizeof(MBIM_COMMAND_MSG_T) + InformationBufferLength)); + pRequest->MessageHeader.TransactionId = htole32(TransactionId++); + + pRequest->FragmentHeader.TotalFragments = htole32(1); + pRequest->FragmentHeader.CurrentFragment= htole32(0); + + memcpy(pRequest->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT), 16); + + pRequest->CID = htole32(CID); + pRequest->CommandType = htole32(CommandType); + if (InformationBufferLength && pInformationBuffer) { + pRequest->InformationBufferLength = htole32(InformationBufferLength); + memcpy(pRequest->InformationBuffer, pInformationBuffer, InformationBufferLength); + } else { + pRequest->InformationBufferLength = htole32(0); + } + + return &pRequest->MessageHeader; +} + +static MBIM_MESSAGE_HEADER *compose_basic_connect_ext_command(UINT32 CID, UINT32 CommandType, void *pInformationBuffer, UINT32 InformationBufferLength) +{ + MBIM_COMMAND_MSG_T *pRequest = (MBIM_COMMAND_MSG_T *)compose_basic_connect_command(CID, CommandType, pInformationBuffer, InformationBufferLength); + + if (!pRequest) + return NULL; + + memcpy(pRequest->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT_EXT), 16); + + return &pRequest->MessageHeader; +} + +static MBIM_MESSAGE_HEADER *compose_qmi_over_mbim_command(UINT32 CID, UINT32 CommandType, void *pInformationBuffer, UINT32 InformationBufferLength) +{ + MBIM_COMMAND_MSG_T *pRequest = (MBIM_COMMAND_MSG_T *)compose_basic_connect_command(CID, CommandType, pInformationBuffer, InformationBufferLength); + + if (!pRequest) + return NULL; + + memcpy(pRequest->DeviceServiceId.uuid, str2uuid(uuid_ext_qmux), 16); + + return &pRequest->MessageHeader; +} + +static const char * uuid2str(const UUID_T *pUUID) { + static char str[16*2+4+1]; + const UINT8 *d = pUUID->uuid; + + snprintf(str, sizeof(str), "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", + d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], + d[8], d[9], d[10], d[11], d[12], d[13], d[14], d[15]); + + return str; +} + +static const char *DeviceServiceId2str(const UUID_T *pUUID) { + const char *str = uuid2str(pUUID); + + struct { char *val;char *name;} _enumstr[] = { + {UUID_BASIC_CONNECT, "UUID_BASIC_CONNECT"}, + {UUID_BASIC_CONNECT_EXT, "UUID_BASIC_CONNECT_EXT"}, + {UUID_SMS, "UUID_SMS"}, + {UUID_USSD, "UUID_USSD"}, + {UUID_PHONEBOOK, "UUID_PHONEBOOK"}, + {UUID_STK, "UUID_STK"}, + {UUID_AUTH, "UUID_AUTH"}, + {UUID_DSS, "UUID_DSS"}, + {uuid_ext_qmux, "uuid_ext_qmux"}, + {uuid_mshsd, "uuid_mshsd"}, + {uuid_qmbe, "uuid_qmbe"}, + {UUID_MSFWID, "UUID_MSFWID"}, + {uuid_atds, "uuid_atds"}, + {uuid_qdu, "uuid_qdu"}, + {UUID_MS_UICC_LOW_LEVEL, "UUID_MS_UICC_LOW_LEVEL"}, + {UUID_MS_SARControl, "UUID_MS_SARControl"}, + {UUID_VOICEEXTENSIONS, "UUID_VOICEEXTENSIONS"}, + {UUID_LIBMBIM_PROXY, "UUID_LIBMBIM_PROXY"}, + }; + int idx; + + for (idx = 0; idx < (int)(sizeof(_enumstr)/sizeof(_enumstr[0])); idx++) { + if (!strcasecmp(str, _enumstr[idx].val)) + return _enumstr[idx].name; + } + + return str; +} + +static const char *mbim_get_segment(void *_pMsg, UINT32 offset, UINT32 len) +{ + int idx; + static char buff[256] = {'\0'}; + UINT8 *pMsg = (UINT8*)_pMsg; + + for (idx = 0; idx < (int)(len/2); idx++) + buff[idx] = pMsg[offset+idx*2]; + buff[idx] = '\0'; + return buff; +} + +static void mbim_dump_header(MBIM_MESSAGE_HEADER *pMsg, const char *direction) { + mbim_debug("%s Header:", direction); + mbim_debug("%s MessageLength = %u", direction, le32toh(pMsg->MessageLength)); + mbim_debug("%s MessageType = %s (0x%08x)", direction, MBIMMSGTypeStr(le32toh(pMsg->MessageType)), le32toh(pMsg->MessageType)); + mbim_debug("%s TransactionId = %u", direction, le32toh(pMsg->TransactionId)); + mbim_debug("%s Contents:", direction); +} + +static void mbim_dump_uuid_cid(const UUID_T *pUUID, UINT32 CID, const char *direction) { + size_t idx; + const char *uuidStr = uuid2str(pUUID); + const char *cidStr = "unknow"; + + for (idx = 0; idx < (sizeof(uuid_cid_string)/sizeof(uuid_cid_string[0])); idx++) { + if (!strcmp(uuidStr, uuid_cid_string[idx].uuid) && uuid_cid_string[idx].cid == CID) { + cidStr = uuid_cid_string[idx].name; + } + } + + mbim_debug("%s DeviceServiceId = %s (%s)", direction, DeviceServiceId2str(pUUID), uuidStr); + mbim_debug("%s CID = %s (%u)", direction, cidStr, le32toh(CID)); +} + + +static void mbim_dump_command_msg(MBIM_COMMAND_MSG_T *pCmdMsg, const char *direction) { + mbim_dump_uuid_cid(&pCmdMsg->DeviceServiceId, le32toh(pCmdMsg->CID), direction); + mbim_debug("%s CommandType = %s (%u)", direction, le32toh(pCmdMsg->CommandType) ? "set" : "query", le32toh(pCmdMsg->CommandType)); + mbim_debug("%s InformationBufferLength = %u", direction, le32toh(pCmdMsg->InformationBufferLength)); +} + +static void mbim_dump_command_done(MBIM_COMMAND_DONE_T *pCmdDone, const char *direction) { + mbim_dump_uuid_cid(&pCmdDone->DeviceServiceId, le32toh(pCmdDone->CID), direction); + mbim_debug("%s Status = %u", direction, le32toh(pCmdDone->Status)); + mbim_debug("%s InformationBufferLength = %u", direction, le32toh(pCmdDone->InformationBufferLength)); +} + +static void mbim_dump_indicate_msg(MBIM_INDICATE_STATUS_MSG_T *pIndMsg, const char *direction) { + mbim_dump_uuid_cid(&pIndMsg->DeviceServiceId, le32toh(pIndMsg->CID), direction); + mbim_debug("%s InformationBufferLength = %u", direction, le32toh(pIndMsg->InformationBufferLength)); +} + +static void mbim_dump_connect(MBIM_CONNECT_T *pInfo, const char *direction) { + mbim_debug("%s SessionId = %u", direction, le32toh(pInfo->SessionId)); + mbim_debug("%s ActivationState = %s (%u)", direction, MBIMActivationStateStr(le32toh(pInfo->ActivationState)), le32toh(pInfo->ActivationState)); + mbim_debug("%s IPType = %s", direction, MBIMContextIPTypeStr(le32toh(pInfo->IPType))); + mbim_debug("%s VoiceCallState = %s", direction, MBIMVoiceCallStateStr(le32toh(pInfo->VoiceCallState))); + mbim_debug("%s ContextType = %s", direction, uuid2str(&pInfo->ContextType)); + mbim_debug("%s NwError = %u", direction, le32toh(pInfo->NwError)); +} + +static void mbim_dump_signal_state(MBIM_SIGNAL_STATE_INFO_T *pInfo, const char *direction) +{ + mbim_debug("%s Rssi = %u", direction, le32toh(pInfo->Rssi)); + mbim_debug("%s ErrorRate = %u", direction, le32toh(pInfo->ErrorRate)); + mbim_debug("%s SignalStrengthInterval = %u", direction, le32toh(pInfo->SignalStrengthInterval)); + mbim_debug("%s RssiThreshold = %u", direction, le32toh(pInfo->RssiThreshold)); + mbim_debug("%s ErrorRateThreshold = %u", direction, le32toh(pInfo->ErrorRateThreshold)); +} + +static void mbim_dump_packet_service(MBIM_PACKET_SERVICE_INFO_T *pInfo, const char *direction) +{ + mbim_debug("%s NwError = %u", direction, le32toh(pInfo->NwError)); + mbim_debug("%s PacketServiceState = %s", direction, MBIMPacketServiceStateStr(le32toh(pInfo->PacketServiceState))); + mbim_debug("%s HighestAvailableDataClass = %s", direction, MBIMDataClassStr(le32toh(pInfo->HighestAvailableDataClass))); + mbim_debug("%s UplinkSpeed = %ld", direction, (long)le64toh(pInfo->UplinkSpeed)); + mbim_debug("%s DownlinkSpeed = %ld", direction, (long)le64toh(pInfo->DownlinkSpeed)); +} + +static void mbim_dump_subscriber_status(MBIM_SUBSCRIBER_READY_STATUS_T *pInfo, const char *direction) +{ + mbim_debug("%s ReadyState = %s", direction, MBIMSubscriberReadyStateStr(le32toh(pInfo->ReadyState))); + mbim_debug("%s SIMICCID = %s", direction, mbim_get_segment(pInfo, le32toh(pInfo->SimIccIdOffset), le32toh(pInfo->SimIccIdSize))); + mbim_debug("%s SubscriberID = %s", direction, mbim_get_segment(pInfo, le32toh(pInfo->SubscriberIdOffset), le32toh(pInfo->SubscriberIdSize))); + /* maybe more than one number */ + uint32_t idx; + for (idx = 0; idx < le32toh(pInfo->ElementCount); idx++) { + UINT32 offset = ((UINT32*)((UINT8*)pInfo+offsetof(MBIM_SUBSCRIBER_READY_STATUS_T, TelephoneNumbersRefList)))[0]; + UINT32 length = ((UINT32*)((UINT8*)pInfo+offsetof(MBIM_SUBSCRIBER_READY_STATUS_T, TelephoneNumbersRefList)))[1]; + mbim_debug("%s Number = %s", direction, mbim_get_segment(pInfo, le32toh(offset), le32toh(length))); + } +} + +static void mbim_dump_regiester_status(MBIM_REGISTRATION_STATE_INFO_T *pInfo, const char *direction) +{ + mbim_debug("%s NwError = %u", direction, le32toh(pInfo->NwError)); + mbim_debug("%s RegisterState = %s", direction, MBIMRegisterStateStr(le32toh(pInfo->RegisterState))); + mbim_debug("%s RegisterMode = %s", direction, MBIMRegisterModeStr(le32toh(pInfo->RegisterMode))); +} + +static void mbim_dump_ipconfig(MBIM_IP_CONFIGURATION_INFO_T *pInfo, const char *direction) +{ + UINT8 prefix = 0, *ipv4=NULL, *ipv6=NULL, *gw=NULL, *dns1=NULL, *dns2=NULL; + + mbim_debug("%s SessionId = %u", direction, le32toh(pInfo->SessionId)); + mbim_debug("%s IPv4ConfigurationAvailable = 0x%x", direction, le32toh(pInfo->IPv4ConfigurationAvailable)); + mbim_debug("%s IPv6ConfigurationAvailable = 0x%x", direction, le32toh(pInfo->IPv6ConfigurationAvailable)); + mbim_debug("%s IPv4AddressCount = 0x%x", direction, le32toh(pInfo->IPv4AddressCount)); + mbim_debug("%s IPv4AddressOffset = 0x%x", direction, le32toh(pInfo->IPv4AddressOffset)); + mbim_debug("%s IPv6AddressCount = 0x%x", direction, le32toh(pInfo->IPv6AddressCount)); + mbim_debug("%s IPv6AddressOffset = 0x%x", direction, le32toh(pInfo->IPv6AddressOffset)); + + /* IPv4 */ + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x1) { + MBIM_IPV4_ELEMENT_T *pAddress = (MBIM_IPV4_ELEMENT_T *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4AddressOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + prefix = le32toh(pAddress->OnLinkPrefixLength); + ipv4 = pAddress->IPv4Address; + mbim_debug("%s IPv4 = %u.%u.%u.%u/%u", direction, ipv4[0], ipv4[1], ipv4[2], ipv4[3], prefix); + } + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x2) { + gw = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4GatewayOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + mbim_debug("%s gw = %u.%u.%u.%u", direction, gw[0], gw[1], gw[2], gw[3]); + } + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x3) { + dns1 = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4DnsServerOffset) -sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + mbim_debug("%s dns1 = %u.%u.%u.%u", direction, dns1[0], dns1[1], dns1[2], dns1[3]); + if (le32toh(pInfo->IPv4DnsServerCount) == 2) { + dns2 = dns1 + 4; + mbim_debug("%s dns2 = %u.%u.%u.%u", direction, dns2[0], dns2[1], dns2[2], dns2[3]); + } + } + if (le32toh(pInfo->IPv4Mtu)) mbim_debug("%s ipv4 mtu = %u", direction, le32toh(pInfo->IPv4Mtu)); + + /* IPv6 */ + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x1) { + MBIM_IPV6_ELEMENT_T *pAddress = (MBIM_IPV6_ELEMENT_T *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6AddressOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + prefix = le32toh(pAddress->OnLinkPrefixLength); + ipv6 = pAddress->IPv6Address; + mbim_debug("%s IPv6 = %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x/%d", \ + direction, ipv6[0], ipv6[1], ipv6[2], ipv6[3], ipv6[4], ipv6[5], ipv6[6], ipv6[7], \ + ipv6[8], ipv6[9], ipv6[10], ipv6[11], ipv6[12], ipv6[13], ipv6[14], ipv6[15], prefix); + } + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x2) { + gw = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6GatewayOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + mbim_debug("%s gw = %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", \ + direction, gw[0], gw[1], gw[2], gw[3], gw[4], gw[5], gw[6], gw[7], \ + gw[8], gw[9], gw[10], gw[11], gw[12], gw[13], gw[14], gw[15]); + } + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x3) { + dns1 = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6DnsServerOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + mbim_debug("%s dns1 = %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", \ + direction, dns1[0], dns1[1], dns1[2], dns1[3], dns1[4], dns1[5], dns1[6], dns1[7], \ + dns1[8], dns1[9], dns1[10], dns1[11], dns1[12], dns1[13], dns1[14], dns1[15]); + if (le32toh(pInfo->IPv6DnsServerCount) == 2) { + dns2 = dns1 + 16; + mbim_debug("%s dns2 = %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", \ + direction, dns2[0], dns2[1], dns2[2], dns2[3], dns1[4], dns1[5], dns1[6], dns1[7], + dns2[8], dns2[9], dns2[10], dns2[11], dns2[12], dns2[13], dns2[14], dns2[15]); + } + } + if (le32toh(pInfo->IPv6Mtu)) mbim_debug("%s ipv6 mtu = %u", direction, le32toh(pInfo->IPv6Mtu)); +} + +static void mbim_dump(MBIM_MESSAGE_HEADER *pMsg, int mbim_verbose) { + unsigned char *data = (unsigned char *)pMsg; + const char *direction = (le32toh(pMsg->MessageType) & 0x80000000) ? "<" : ">"; + + if (!mbim_verbose) + return; + + if (mbim_verbose) { + unsigned i; + static char *_tmp = NULL; + + if (!_tmp) + _tmp = (char *)malloc(4096); + + if (_tmp) { + _tmp[0] = (le32toh(pMsg->MessageType) & 0x80000000) ? '<' : '>'; + _tmp[1] = '\0'; + for (i = 0; i < le32toh(pMsg->MessageLength) && i < 4096; i++) + snprintf(_tmp + strlen(_tmp), 4096 - strlen(_tmp), "%02X:", data[i]); + mbim_debug("%s", _tmp); + } + } + + mbim_dump_header(pMsg, direction); + + switch (le32toh(pMsg->MessageType)) { + case MBIM_OPEN_MSG: { + MBIM_OPEN_MSG_T *pOpenMsg = (MBIM_OPEN_MSG_T *)pMsg; + mbim_debug("%s MaxControlTransfer = %u", direction, le32toh(pOpenMsg->MaxControlTransfer)); + } + break; + case MBIM_OPEN_DONE: { + MBIM_OPEN_DONE_T *pOpenDone = (MBIM_OPEN_DONE_T *)pMsg; + mbim_debug("%s Status = %u", direction, le32toh(pOpenDone->Status)); + } + break; + case MBIM_CLOSE_MSG: { + + } + break; + case MBIM_CLOSE_DONE: { + MBIM_CLOSE_DONE_T *pCloseDone = (MBIM_CLOSE_DONE_T *)pMsg; + mbim_debug("%s Status = %u", direction, le32toh(pCloseDone->Status)); + } + break; + case MBIM_COMMAND_MSG: { + MBIM_COMMAND_MSG_T *pCmdMsg = (MBIM_COMMAND_MSG_T *)pMsg; + + mbim_dump_command_msg(pCmdMsg, direction); + if (!memcmp(pCmdMsg->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT), 16)) { + switch (le32toh(pCmdMsg->CID)) { + case MBIM_CID_CONNECT: { + MBIM_SET_CONNECT_T *pInfo = (MBIM_SET_CONNECT_T *)pCmdMsg->InformationBuffer; + mbim_debug("%s SessionId = %u", direction, le32toh(pInfo->SessionId)); + } + break; + case MBIM_CID_IP_CONFIGURATION: { + MBIM_IP_CONFIGURATION_INFO_T *pInfo = (MBIM_IP_CONFIGURATION_INFO_T *)pCmdMsg->InformationBuffer; + mbim_debug("%s SessionId = %u", direction, le32toh(pInfo->SessionId)); + } + break; + default: + break; + } + } + } + break; + case MBIM_COMMAND_DONE: { + MBIM_COMMAND_DONE_T *pCmdDone = (MBIM_COMMAND_DONE_T *)pMsg; + + mbim_dump_command_done(pCmdDone, direction); + if (le32toh(pCmdDone->InformationBufferLength) == 0) + return; + + if (!memcmp(pCmdDone->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT), 16)) { + switch (le32toh(pCmdDone->CID)) { + case MBIM_CID_CONNECT: { + MBIM_CONNECT_T *pInfo = (MBIM_CONNECT_T *)pCmdDone->InformationBuffer; + mbim_dump_connect(pInfo, direction); + } + break; + case MBIM_CID_IP_CONFIGURATION: { + //MBIM_IP_CONFIGURATION_INFO_T *pInfo = (MBIM_IP_CONFIGURATION_INFO_T *)pCmdDone->InformationBuffer; + //mbim_dump_ipconfig(pInfo, direction); + } + break; + case MBIM_CID_PACKET_SERVICE: { + MBIM_PACKET_SERVICE_INFO_T *pInfo = (MBIM_PACKET_SERVICE_INFO_T *)pCmdDone->InformationBuffer; + mbim_dump_packet_service(pInfo, direction); + } + break; + case MBIM_CID_SUBSCRIBER_READY_STATUS: { + MBIM_SUBSCRIBER_READY_STATUS_T *pInfo = (MBIM_SUBSCRIBER_READY_STATUS_T *)pCmdDone->InformationBuffer; + mbim_dump_subscriber_status(pInfo, direction); + } + break; + case MBIM_CID_REGISTER_STATE: { + MBIM_REGISTRATION_STATE_INFO_T *pInfo = (MBIM_REGISTRATION_STATE_INFO_T *)pCmdDone->InformationBuffer; + mbim_dump_regiester_status(pInfo, direction); + } + break; + default: + break; + } + } + } + break; + case MBIM_INDICATE_STATUS_MSG: { + MBIM_INDICATE_STATUS_MSG_T *pIndMsg = (MBIM_INDICATE_STATUS_MSG_T *)pMsg; + + mbim_dump_indicate_msg(pIndMsg, direction); + if (le32toh(pIndMsg->InformationBufferLength) == 0) + return; + + if (!memcmp(pIndMsg->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT), 16)) { + switch (le32toh(pIndMsg->CID)) { + case MBIM_CID_CONNECT: { + MBIM_CONNECT_T *pInfo = (MBIM_CONNECT_T *)pIndMsg->InformationBuffer; + mbim_dump_connect(pInfo, direction); + } + break; + case MBIM_CID_SIGNAL_STATE: { + MBIM_SIGNAL_STATE_INFO_T *pInfo = (MBIM_SIGNAL_STATE_INFO_T *)pIndMsg->InformationBuffer; + mbim_dump_signal_state(pInfo, direction); + } + break; + case MBIM_CID_SUBSCRIBER_READY_STATUS: { + MBIM_SUBSCRIBER_READY_STATUS_T *pInfo = (MBIM_SUBSCRIBER_READY_STATUS_T *)pIndMsg->InformationBuffer; + mbim_dump_subscriber_status(pInfo, direction); + } + break; + case MBIM_CID_REGISTER_STATE: { + MBIM_REGISTRATION_STATE_INFO_T *pInfo = (MBIM_REGISTRATION_STATE_INFO_T *)pIndMsg->InformationBuffer; + mbim_dump_regiester_status(pInfo, direction); + } + break; + case MBIM_CID_PACKET_SERVICE: { + MBIM_PACKET_SERVICE_INFO_T *pInfo = (MBIM_PACKET_SERVICE_INFO_T *)pIndMsg->InformationBuffer; + mbim_dump_packet_service(pInfo, direction); + } + break; + default: + break; + } + } + else if (!memcmp(pIndMsg->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT_EXT), 16)) { + } + } + break; + case MBIM_FUNCTION_ERROR_MSG: { + MBIM_FUNCTION_ERROR_MSG_T *pErrMsg = (MBIM_FUNCTION_ERROR_MSG_T*)pMsg; + mbim_debug("%s ErrorStatusCode = %u", direction, le32toh(pErrMsg->ErrorStatusCode)); + } + break; + default: + break; + } +} + +static void mbim_recv_command(MBIM_MESSAGE_HEADER *pResponse, unsigned size) +{ + (void)size; + pthread_mutex_lock(&mbim_command_mutex); + + if (pResponse) + mbim_dump(pResponse, mbim_verbose); + + if (pResponse == NULL) { + pthread_cond_signal(&mbim_command_cond); + } + else if (mbim_pRequest && le32toh(mbim_pRequest->TransactionId) == le32toh(pResponse->TransactionId)) { + mbim_pResponse = mbim_alloc(le32toh(pResponse->MessageLength)); + if (mbim_pResponse) + memcpy(mbim_pResponse, pResponse, le32toh(pResponse->MessageLength)); + pthread_cond_signal(&mbim_command_cond); + } + else if (le32toh(pResponse->MessageType) == MBIM_INDICATE_STATUS_MSG) { + MBIM_INDICATE_STATUS_MSG_T *pIndMsg = (MBIM_INDICATE_STATUS_MSG_T *)pResponse; + + if (!memcmp(pIndMsg->DeviceServiceId.uuid, str2uuid(UUID_BASIC_CONNECT), 16)) + { + switch (le32toh(pIndMsg->CID)) { + case MBIM_CID_SUBSCRIBER_READY_STATUS: { + MBIM_SUBSCRIBER_READY_STATUS_T *pInfo = (MBIM_SUBSCRIBER_READY_STATUS_T *)pIndMsg->InformationBuffer; + if (oldReadyState != le32toh(pInfo->ReadyState)) + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } + break; + case MBIM_CID_REGISTER_STATE: { + MBIM_REGISTRATION_STATE_INFO_T *pInfo = (MBIM_REGISTRATION_STATE_INFO_T *)pIndMsg->InformationBuffer; + if (oldRegisterState != le32toh(pInfo->RegisterState)) + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } + break; + case MBIM_CID_PACKET_SERVICE: { + MBIM_PACKET_SERVICE_INFO_T *pInfo = (MBIM_PACKET_SERVICE_INFO_T *)pIndMsg->InformationBuffer; + MBIM_PACKET_SERVICE_STATE_E state = le32toh(pInfo->PacketServiceState); + + if (oldPacketServiceState != state + && (1 || MBIMPacketServiceStateAttached == state || MBIMPacketServiceStateDetached == state)) + qmidevice_send_event_to_main(RIL_UNSOL_RESPONSE_VOICE_NETWORK_STATE_CHANGED); + } + break; + case MBIM_CID_CONNECT: { + MBIM_CONNECT_T *pInfo = (MBIM_CONNECT_T *)pIndMsg->InformationBuffer; + if (pInfo->SessionId == (uint32_t)mbim_sessionID) { + MBIM_ACTIVATION_STATE_E state = le32toh(pInfo->ActivationState); + + if (oldActivationState != state + && (1 || MBIMActivationStateActivated == state || MBIMActivationStateDeactivated == state)) + qmidevice_send_event_to_main(RIL_UNSOL_DATA_CALL_LIST_CHANGED); + } + } + break; + default: + break; + } + } + } + + pthread_mutex_unlock(&mbim_command_mutex); +} + +static int mbim_send_command(MBIM_MESSAGE_HEADER *pRequest, MBIM_COMMAND_DONE_T **ppCmdDone, unsigned msecs) { + int ret; + + if (ppCmdDone) + *ppCmdDone = NULL; + + if (mbim_fd <= 0) + return -ENODEV; + + if (s_tid_reader == 0) + return -EINVAL; + + if (!pRequest) + return -ENOMEM; + + pthread_mutex_lock(&mbim_command_mutex); + + if (pRequest) { + if (pRequest->TransactionId == (0xFFFFFF + 1)) { //quectel-mbim-proxy need 0xFF000000 to indicat client + TransactionId = 1; + pRequest->TransactionId = htole32(TransactionId++); + } + mbim_dump(pRequest, mbim_verbose); + } + + mbim_pRequest = pRequest; + mbim_pResponse = NULL; + + ret = write(mbim_fd, pRequest, le32toh(pRequest->MessageLength)); + + if (ret > 0 && (uint32_t)ret == le32toh(pRequest->MessageLength)) { + ret = pthread_cond_timeout_np(&mbim_command_cond, &mbim_command_mutex, msecs); + if (!ret) { + if (mbim_pResponse && ppCmdDone) { + *ppCmdDone = (MBIM_COMMAND_DONE_T *)mbim_pResponse; + } + } + } else { + mbim_debug("%s pthread_cond_timeout_np=%d", __func__, ret); + } + + mbim_pRequest = mbim_pResponse = NULL; + + pthread_mutex_unlock(&mbim_command_mutex); + + return ret; +} + +static ssize_t mbim_proxy_read (int fd, MBIM_MESSAGE_HEADER *pResponse, size_t size) { + ssize_t nreads; + + nreads = read(fd, pResponse, sizeof(MBIM_MESSAGE_HEADER)); + if (nreads == sizeof(MBIM_MESSAGE_HEADER) && le32toh(pResponse->MessageLength) <= size) { + nreads += read(fd, pResponse+1, le32toh(pResponse->MessageLength) - sizeof(MBIM_MESSAGE_HEADER)); + } + + return nreads; +} + +static void * mbim_read_thread(void *param) { + PROFILE_T *profile = (PROFILE_T *)param; + const char *cdc_wdm = (const char *)profile->qmichannel; + int wait_for_request_quit = 0; + + mbim_verbose = debug_qmi; + s_tid_reader = pthread_self(); + + if (profile->qmap_mode > 1 && profile->qmapnet_adapter[0]) { + if (!profile->proxy[0]) + sprintf(profile->proxy, "%s", QUECTEL_MBIM_PROXY); + mbim_sessionID = profile->pdp; + } + + if (profile->proxy[0]) { + mbim_fd = cm_open_proxy(profile->proxy); + } + else { + mbim_fd = cm_open_dev(cdc_wdm); + } + + if (mbim_fd <= 0) { + mbim_debug("fail to open (%s), errno: %d (%s)", cdc_wdm, errno, strerror(errno)); + goto __quit; + } + + dbg_time("cdc_wdm_fd = %d", mbim_fd); + + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); + + while (mbim_fd > 0) { + struct pollfd pollfds[] = {{mbim_fd, POLLIN, 0}, {qmidevice_control_fd[1], POLLIN, 0}, {qmi_over_mbim_sk[1], POLLIN, 0}}; + int ne, ret, nevents = 2; + + if (pollfds[nevents].fd != -1) + nevents++; + + ret = poll(pollfds, nevents, wait_for_request_quit ? 1000 : -1); + + if (ret == 0 && wait_for_request_quit) { + break; + } + + if (ret < 0) { + mbim_debug("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + break; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + mbim_debug("%s poll err/hup/inval", __func__); + mbim_debug("epoll fd = %d, events = 0x%04x", fd, revents); + if (revents & (POLLERR | POLLHUP | POLLNVAL)) + goto __quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (mbim_fd == fd) { + ssize_t nreads; + MBIM_MESSAGE_HEADER *pResponse = (MBIM_MESSAGE_HEADER *) cm_recv_buf; + + if (profile->proxy[0]) + nreads = mbim_proxy_read(fd, pResponse, sizeof(cm_recv_buf)); + else + nreads = read(fd, pResponse, sizeof(cm_recv_buf)); + + if (nreads <= 0) { + mbim_debug("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + break; + } + + mbim_recv_command(pResponse, nreads); + } + else if (fd == qmidevice_control_fd[1]) { + int triger_event; + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + //mbim_debug("triger_event = 0x%x", triger_event); + switch (triger_event) { + case RIL_REQUEST_QUIT: + goto __quit; + break; + case SIG_EVENT_STOP: + wait_for_request_quit = 1; + break; + default: + break; + } + } + } + else if (fd == qmi_over_mbim_sk[1]) { + ssize_t nreads = read(fd, cm_recv_buf, sizeof(cm_recv_buf)); + if (nreads > 0) + QmiThreadRecvQMI((PQCQMIMSG)cm_recv_buf); + } + } + } + +__quit: + if (mbim_fd != -1) { close(mbim_fd); mbim_fd = -1; } + mbim_recv_command(NULL, 0); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + mbim_debug("%s exit", __func__); + s_tid_reader = 0; + + return NULL; +} + +static int mbim_status_code(MBIM_MESSAGE_HEADER *pMsgHdr) { + int status = 0; + + if (!pMsgHdr) + return 0; + + switch (le32toh(pMsgHdr->MessageType)) { + case MBIM_OPEN_DONE: { + MBIM_OPEN_DONE_T *pOpenDone = (MBIM_OPEN_DONE_T *)pMsgHdr; + status = le32toh(pOpenDone->Status); + } + break; + case MBIM_CLOSE_DONE: { + MBIM_CLOSE_DONE_T *pCloseDone = (MBIM_CLOSE_DONE_T *)pMsgHdr; + status = le32toh(pCloseDone->Status); + } + break; + case MBIM_COMMAND_DONE: { + MBIM_COMMAND_DONE_T *pCmdDone = (MBIM_COMMAND_DONE_T *)pMsgHdr; + status = le32toh(pCmdDone->Status); + } + break; + case MBIM_FUNCTION_ERROR_MSG: { + MBIM_FUNCTION_ERROR_MSG_T *pErrMsg = (MBIM_FUNCTION_ERROR_MSG_T *)pMsgHdr; + status = le32toh(pErrMsg->ErrorStatusCode); + if (status == MBIM_ERROR_NOT_OPENED) + mbim_open_state = 0; //EM06ELAR03A05M4G when suspend/resume, may get this error + } + break; + default: + break; + } + + return status; +} + +#define mbim_check_err(err, pRequest, pCmdDone) do { \ + int _status = mbim_status_code(pCmdDone ? &pCmdDone->MessageHeader : NULL); \ + if (err || _status || !pCmdDone) { \ + if (pCmdDone) { mbim_dump(&pCmdDone->MessageHeader, (mbim_verbose == 0)); } \ + mbim_free(pRequest); mbim_free(pCmdDone); \ + mbim_debug("%s:%d err=%d, Status=%d", __func__, __LINE__, err, _status); \ + if (err) return err; \ + if (_status) return _status; \ + return 8888; \ + } \ +} while(0) + +/* + * MBIM device can be open repeatly without error + * So, we can call the function, no matter it have been opened or not + */ +static int mbim_open_device(uint32_t MaxControlTransfer) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_OPEN_DONE_T *pOpenDone = NULL; + int err = 0; + + mbim_debug("%s()", __func__); + pRequest = compose_open_command(MaxControlTransfer); + err = mbim_send_command(pRequest, (MBIM_COMMAND_DONE_T **)&pOpenDone, 3*1000); //EM06ELAR03A09M4G take about 2.5 seconds + mbim_check_err(err, pRequest, pOpenDone); + + err = le32toh(pOpenDone->Status); + mbim_free(pRequest); mbim_free(pOpenDone); + + return err; +} + +static int mbim_close_device(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_CLOSE_DONE_T *pCloseDone = NULL; + int err = 0; + + mbim_debug("%s()", __func__); + pRequest = compose_close_command(); + err = mbim_send_command(pRequest, (MBIM_COMMAND_DONE_T **)&pCloseDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCloseDone); + + err = le32toh(pCloseDone->Status); + mbim_free(pRequest); mbim_free(pCloseDone); + + return err; +} + +static int mbim_query_connect(int sessionID) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + MBIM_SET_CONNECT_T set_connect; + int err; + + if (ActivationState != MBIMActivationStateActivated || mbim_verbose) + mbim_debug("%s(sessionID=%d)", __func__, sessionID); //avoid two many log + set_connect.SessionId = htole32(sessionID); + pRequest = compose_basic_connect_command(MBIM_CID_CONNECT, MBIM_CID_CMD_TYPE_QUERY, &set_connect, sizeof(set_connect)); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) + { + MBIM_CONNECT_T *pInfo = (MBIM_CONNECT_T *)pCmdDone->InformationBuffer; + ActivationState = le32toh(pInfo->ActivationState); + mbim_update_state(); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_ms_version_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + struct _bc_ext_version { + UINT8 ver_minor; + UINT8 ver_major; + UINT8 ext_ver_minor; + UINT8 ext_ver_major; + } __attribute__ ((packed)) bc_ext_version; + + bc_ext_version.ver_major = 1; + bc_ext_version.ver_minor = 0; + bc_ext_version.ext_ver_major = 2; + bc_ext_version.ext_ver_minor = 0; + + pRequest = compose_basic_connect_ext_command(MBIM_CID_MS_VERSION, MBIM_CID_CMD_TYPE_QUERY, &bc_ext_version, sizeof(bc_ext_version)); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + struct _bc_ext_version *pInfo = (struct _bc_ext_version *)pCmdDone->InformationBuffer; + //mbim_debug("%s ext_rel_ver major=%d, minor=%d", __func__, pInfo->ext_ver_major, pInfo->ext_ver_minor); + mbim_ms_version = pInfo->ext_ver_major; + } + + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_device_services_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + int mbim_v2_support = 0; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_DEVICE_SERVICES, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (pCmdDone->InformationBufferLength) { + MBIM_DEVICE_SERVICES_INFO_T *pInfo = (MBIM_DEVICE_SERVICES_INFO_T *)pCmdDone->InformationBuffer; + UINT32 i; + + for (i = 0; i < le32toh(pInfo->DeviceServicesCount) ; i++) { + //UINT32 size = pInfo->DeviceServicesRefList[i].size; + UINT32 offset = le32toh(pInfo->DeviceServicesRefList[i].offset); + MBIM_DEVICE_SERVICE_ELEMENT_T *pSrvEle = (MBIM_DEVICE_SERVICE_ELEMENT_T *)((void *)pInfo + offset); + + //mbim_debug("\t[%2d] %s (%s)", i, DeviceServiceId2str(&pSrvEle->DeviceServiceId), uuid2str(&pSrvEle->DeviceServiceId)); + if (!strcasecmp(UUID_BASIC_CONNECT_EXT, uuid2str(&pSrvEle->DeviceServiceId))) { + UINT32 cid = 0; + + for (cid = 0; cid < le32toh(pSrvEle->CidCount); cid++) { + if (MBIM_CID_MS_VERSION == le32toh(pSrvEle->CidList[cid])) { + mbim_v2_support = 1; + } + } + } + else if (!strcasecmp(uuid_ext_qmux, uuid2str(&pSrvEle->DeviceServiceId))) { + qmi_over_mbim_support = 1; + } + } + } + mbim_free(pRequest); mbim_free(pCmdDone); + + if (mbim_v2_support) { + mbim_ms_version_query(); + } + + return err; +} + +static int mbim_device_caps_query(PROFILE_T *profile) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_DEVICE_CAPS, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_DEVICE_CAPS_INFO_T *pInfo = (MBIM_DEVICE_CAPS_INFO_T *)pCmdDone->InformationBuffer; + char tmp[32]; + + if (le32toh(pInfo->DeviceIdOffset) && le32toh(pInfo->DeviceIdSize)) { + wchar2char((const char *)pInfo + le32toh(pInfo->DeviceIdOffset), le32toh(pInfo->DeviceIdSize), tmp, sizeof(tmp)); + mbim_debug("DeviceId: %s", tmp); + } + if (le32toh(pInfo->FirmwareInfoOffset) && le32toh(pInfo->FirmwareInfoSize)) { + wchar2char((const char *)pInfo + le32toh(pInfo->FirmwareInfoOffset), le32toh(pInfo->FirmwareInfoSize), tmp, sizeof(tmp)); + strncpy(profile->BaseBandVersion, tmp, sizeof(profile->BaseBandVersion)); + mbim_debug("FirmwareInfo: %s", tmp); + } + if (le32toh(pInfo->HardwareInfoOffset) && le32toh(pInfo->HardwareInfoSize)) { + wchar2char((const char *)pInfo + le32toh(pInfo->HardwareInfoOffset), le32toh(pInfo->HardwareInfoSize), tmp, sizeof(tmp)); + mbim_debug("HardwareInfo: %s", tmp); + } + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +#if 0 +static int mbim_radio_state_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_RADIO_STATE, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (pCmdDone->InformationBufferLength) { + MBIM_RADIO_STATE_INFO_T *pInfo = (MBIM_RADIO_STATE_INFO_T *)pCmdDone->InformationBuffer; + mbim_debug("HwRadioState: %d, SwRadioState: %d", pInfo->HwRadioState, pInfo->SwRadioState); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} +#endif + +static int mbim_set_radio_state(MBIM_RADIO_SWITCH_STATE_E RadioState) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + UINT32 value = htole32(RadioState); + int err; + + mbim_debug("%s( %d )", __func__, RadioState); + pRequest = compose_basic_connect_command(MBIM_CID_RADIO_STATE, MBIM_CID_CMD_TYPE_SET, &value, sizeof(value)); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_RADIO_STATE_INFO_T *pInfo = (MBIM_RADIO_STATE_INFO_T *)pCmdDone->InformationBuffer; + mbim_debug("HwRadioState: %d, SwRadioState: %d", le32toh(pInfo->HwRadioState), le32toh(pInfo->SwRadioState)); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_subscriber_status_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_SUBSCRIBER_READY_STATUS, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_SUBSCRIBER_READY_STATUS_T *pInfo = (MBIM_SUBSCRIBER_READY_STATUS_T *)pCmdDone->InformationBuffer; + char tmp[32]; + + wchar2char((const char *)pInfo + le32toh(pInfo->SubscriberIdOffset), le32toh(pInfo->SubscriberIdSize), tmp, sizeof(tmp)); + mbim_debug("SubscriberId: %s", tmp); + wchar2char((const char *)pInfo + le32toh(pInfo->SimIccIdOffset), le32toh(pInfo->SimIccIdSize), tmp, sizeof(tmp)); + mbim_debug("SimIccId: %s", tmp); + ReadyState = le32toh(pInfo->ReadyState); + mbim_update_state(); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_register_state_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_REGISTER_STATE, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_REGISTRATION_STATE_INFO_T *pInfo = (MBIM_REGISTRATION_STATE_INFO_T *)pCmdDone->InformationBuffer;; + RegisterState = le32toh(pInfo->RegisterState); + mbim_update_state(); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_packet_service_query(void) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_PACKET_SERVICE, MBIM_CID_CMD_TYPE_QUERY, NULL, 0); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_PACKET_SERVICE_INFO_T *pInfo = (MBIM_PACKET_SERVICE_INFO_T *)pCmdDone->InformationBuffer; + PacketServiceState = le32toh(pInfo->PacketServiceState); + mbim_update_state(); + + if (le32toh(pCmdDone->InformationBufferLength) == sizeof(MBIM_PACKET_SERVICE_INFO_V2_T)) { + MBIM_PACKET_SERVICE_INFO_V2_T *pInfo = (MBIM_PACKET_SERVICE_INFO_V2_T *)pCmdDone->InformationBuffer; + mbim_debug("CurrentDataClass = %s", MBIMDataClassStr(le32toh(pInfo->CurrentDataClass))); + } + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_packet_service_set(MBIM_PACKET_SERVICE_ACTION_E action) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + UINT32 value = htole32(action); + int err; + + mbim_debug("%s()", __func__); + pRequest = compose_basic_connect_command(MBIM_CID_PACKET_SERVICE, MBIM_CID_CMD_TYPE_SET, &value, sizeof(value)); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_PACKET_SERVICE_INFO_T *pInfo = (MBIM_PACKET_SERVICE_INFO_T *)pCmdDone->InformationBuffer; + PacketServiceState = le32toh(pInfo->PacketServiceState); + mbim_update_state(); + } + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +#define _align_32(len) {len += (len % 4) ? (4 - (len % 4)) : 0;} +static int mbim_populate_connect_data(MBIM_SET_CONNECT_T **connect_req_ptr) { + int offset; + int buflen = 0; + + if (mbim_apn && strlen(mbim_apn) > 0) buflen += 2*strlen(mbim_apn) ; + _align_32(buflen); + if (mbim_user && strlen(mbim_user) > 0) buflen += 2*strlen(mbim_user); + _align_32(buflen); + if (mbim_passwd && strlen(mbim_passwd) > 0) buflen += 2*strlen(mbim_passwd); + _align_32(buflen); + + *connect_req_ptr = (MBIM_SET_CONNECT_T*)malloc(sizeof(MBIM_SET_CONNECT_T) + buflen); + if (! *connect_req_ptr) { + mbim_debug("not enough memory\n"); + return -1; + } + memset(*connect_req_ptr, 0, sizeof(MBIM_SET_CONNECT_T) + buflen); + + offset = 0; + if (mbim_apn && strlen(mbim_apn) > 0) { + (*connect_req_ptr)->AccessStringSize = htole32(2*strlen(mbim_apn)); + (*connect_req_ptr)->AccessStringOffset = htole32(offset + sizeof(MBIM_SET_CONNECT_T)); + offset = char2wchar(mbim_apn, strlen(mbim_apn), &(*connect_req_ptr)->DataBuffer[offset], buflen - offset); + _align_32(offset); + } + + if (mbim_user && strlen(mbim_user) > 0) { + (*connect_req_ptr)->UserNameSize = htole32(2*strlen(mbim_user)); + (*connect_req_ptr)->UserNameOffset = htole32(offset + sizeof(MBIM_SET_CONNECT_T)); + offset = char2wchar(mbim_user, strlen(mbim_user), &(*connect_req_ptr)->DataBuffer[offset], buflen - offset); + _align_32(offset); + } + + if (mbim_passwd && strlen(mbim_passwd) > 0) { + (*connect_req_ptr)->PasswordSize = htole32(2*strlen(mbim_passwd)); + (*connect_req_ptr)->PasswordOffset = htole32(offset + sizeof(MBIM_SET_CONNECT_T)); + offset = char2wchar(mbim_passwd, strlen(mbim_passwd), &(*connect_req_ptr)->DataBuffer[offset], buflen - offset); + } + + return buflen; +} + +static int mbim_set_connect(int onoff, int sessionID) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + MBIM_SET_CONNECT_T *set_connect = NULL; + int err; + + mbim_debug("%s(onoff=%d, sessionID=%d)", __func__, onoff, sessionID); + /* alloc memory then populate APN USERNAME PASSWORD */ + int buflen = mbim_populate_connect_data(&set_connect); + if (buflen < 0) { + return ENOMEM; + } + + set_connect->SessionId = htole32(sessionID); + if (onoff == 0) + set_connect->ActivationCommand = htole32(MBIMActivationCommandDeactivate); + else + set_connect->ActivationCommand = htole32(MBIMActivationCommandActivate); + + set_connect->Compression = htole32(MBIMCompressionNone); + set_connect->AuthProtocol = htole32(mbim_auth); + set_connect->IPType = htole32(mbim_iptype); + memcpy(set_connect->ContextType.uuid, str2uuid(UUID_MBIMContextTypeInternet), 16); + + pRequest = compose_basic_connect_command(MBIM_CID_CONNECT, MBIM_CID_CMD_TYPE_SET, set_connect, sizeof(MBIM_SET_CONNECT_T) + buflen); + mbim_free(set_connect); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout*10); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + MBIM_CONNECT_T *pInfo = (MBIM_CONNECT_T *)pCmdDone->InformationBuffer; + ActivationState = le32toh(pInfo->ActivationState); + mbim_update_state(); + } + + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_ip_config(PROFILE_T *profile, int sessionID) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + MBIM_IP_CONFIGURATION_INFO_T ip_info; + int err; + + if (profile->ipv4.Address == 0 || mbim_verbose) + mbim_debug("%s(sessionID=%d)", __func__, sessionID); + ip_info.SessionId = htole32(sessionID); + pRequest = compose_basic_connect_command(MBIM_CID_IP_CONFIGURATION, MBIM_CID_CMD_TYPE_QUERY, &ip_info, sizeof(ip_info)); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + if (le32toh(pCmdDone->InformationBufferLength)) { + UINT8 prefix, *ipv4=NULL, *ipv6=NULL, *gw=NULL, *dns1=NULL, *dns2=NULL; + UINT32 mtu = 1500; + MBIM_IP_CONFIGURATION_INFO_T *pInfo = (MBIM_IP_CONFIGURATION_INFO_T *)pCmdDone->InformationBuffer; + + /* IPv4 network configration */ + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x1) { + MBIM_IPV4_ELEMENT_T *pAddress = (MBIM_IPV4_ELEMENT_T *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4AddressOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + prefix = le32toh(pAddress->OnLinkPrefixLength); + ipv4 = pAddress->IPv4Address; + + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x2) + gw = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4GatewayOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x4) { + dns1 = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv4DnsServerOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + if (le32toh(pInfo->IPv4DnsServerCount) == 2) + dns2 = dns1 + 4; + } + + if (le32toh(pInfo->IPv4ConfigurationAvailable)&0x8) + mtu = le32toh(pInfo->IPv4Mtu); + + if (profile->ipv4.Address != mbim2qmi_ipv4addr(*(uint32_t *)ipv4) || mbim_verbose) { + mbim_dump_ipconfig(pInfo, "<"); + profile->ipv4.Address = mbim2qmi_ipv4addr(*(uint32_t *)ipv4); + } + + if(gw != NULL) + profile->ipv4.Gateway = mbim2qmi_ipv4addr(*(uint32_t *)gw); + profile->ipv4.SubnetMask = mbim2qmi_ipv4addr(0xFFFFFFFF>>(32-prefix)<<(32-prefix)); + if(dns1 != NULL) + profile->ipv4.DnsPrimary = mbim2qmi_ipv4addr(*(uint32_t *)dns1); + if(dns2 != NULL) + profile->ipv4.DnsSecondary = mbim2qmi_ipv4addr(*(uint32_t *)dns2); + profile->ipv4.Mtu = mbim2qmi_ipv4addr(mtu); + } + + /* IPv6 network configration */ + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x1) { + gw = NULL; dns1 = NULL; dns2 = NULL; + MBIM_IPV6_ELEMENT_T *pAddress = (MBIM_IPV6_ELEMENT_T *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6AddressOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + prefix = le32toh(pAddress->OnLinkPrefixLength); + ipv6 = pAddress->IPv6Address; + + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x2) + gw = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6GatewayOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x4) { + dns1 = (UINT8 *)(&pInfo->DataBuffer[le32toh(pInfo->IPv6DnsServerOffset)-sizeof(MBIM_IP_CONFIGURATION_INFO_T)]); + if (le32toh(pInfo->IPv6DnsServerCount) == 2) + dns2 = dns1 + 16; + } + + if (le32toh(pInfo->IPv6ConfigurationAvailable)&0x8) + mtu = le32toh(pInfo->IPv6Mtu); + + if(ipv6 != NULL) + mbim2qmi_ipv6addr(ipv6, profile->ipv6.Address); + if(gw != NULL) + mbim2qmi_ipv6addr(gw, profile->ipv6.Gateway); + if(dns1 != NULL) + mbim2qmi_ipv6addr(dns1, profile->ipv6.DnsPrimary); + if(dns2 != NULL) + mbim2qmi_ipv6addr(dns2, profile->ipv6.DnsSecondary); + profile->ipv6.PrefixLengthIPAddr = prefix; + profile->ipv6.PrefixLengthGateway = prefix; + profile->ipv6.Mtu = mbim2qmi_ipv4addr(mtu); + } + } + return err; +} + +int mbim_proxy_configure(const char *dev) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + MBIM_LIBQMI_PROXY_CONFIG_T *cfg; + int err; + + pRequest = compose_basic_connect_command( + MBIM_CID_PROXY_CONTROL_CONFIGURATION, + MBIM_CID_CMD_TYPE_SET, + NULL, + sizeof(*cfg) + strlen(dev)*2); + if (pRequest) { + memcpy(((MBIM_COMMAND_MSG_T *)pRequest)->DeviceServiceId.uuid, str2uuid(UUID_LIBMBIM_PROXY), 16); + cfg = (MBIM_LIBQMI_PROXY_CONFIG_T *)((MBIM_COMMAND_MSG_T *)pRequest)->InformationBuffer; + + cfg->DevicePathOffset = sizeof(*cfg); + cfg->DevicePathSize = char2wchar(dev, strlen(dev), cfg->DataBuffer, strlen(dev)*2); + cfg->Timeout = 15; + } + + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} + +static int mbim_update_state(void) { + int chages = 0; + + if (oldReadyState != ReadyState) { + mbim_debug("SubscriberReadyState %s -> %s ", MBIMSubscriberReadyStateStr(oldReadyState), MBIMSubscriberReadyStateStr(ReadyState)); + oldReadyState = ReadyState; chages++; + } + if (oldRegisterState != RegisterState) { + mbim_debug("RegisterState %s -> %s ", MBIMRegisterStateStr(oldRegisterState), MBIMRegisterStateStr(RegisterState)); + oldRegisterState = RegisterState; chages++; + } + if (oldPacketServiceState != PacketServiceState) { + mbim_debug("PacketServiceState %s -> %s ", MBIMPacketServiceStateStr(oldPacketServiceState), MBIMPacketServiceStateStr(PacketServiceState)); + oldPacketServiceState = PacketServiceState; chages++; + } + if (oldActivationState != ActivationState) { + mbim_debug("ActivationState %s -> %s ", MBIMActivationStateStr(oldActivationState), MBIMActivationStateStr(ActivationState)); + oldActivationState = ActivationState; chages++; + } + + return chages; +} + +static int mbim_init(PROFILE_T *profile) { + int retval; + int t = 0; + + if (profile->proxy[0] && !strcmp(profile->proxy, LIBMBIM_PROXY)) { + retval = mbim_proxy_configure(profile->qmichannel); + if (retval) goto exit; + } + + while (t++ < 10) { + retval = mbim_open_device(4096); + if (retval != ETIMEDOUT) + break; + } + if (retval) goto exit; + retval = mbim_device_caps_query(profile); + if (retval) goto exit; + mbim_update_state(); + retval = mbim_device_services_query(); + if (retval) goto exit; + mbim_update_state(); + retval = mbim_set_radio_state(MBIMRadioOn); + if (retval) goto exit; + mbim_update_state(); + + if (qmi_over_mbim_support) { + if (!socketpair( AF_LOCAL, SOCK_STREAM, 0, qmi_over_mbim_sk)) { + qmidev_send = qmi_over_mbim_qmidev_send; +#ifdef CONFIG_CELLINFO //by now, only this function need QMI OVER MBIM + qmi_over_mbim_nas = qmi_over_mbim_get_client_id(QMUX_TYPE_NAS); +#endif + } + } + + return 0; + +exit: + return retval; +} + +static int mbim_deinit(void) { + if (qmi_over_mbim_nas) { + qmi_over_mbim_release_client_id(QMUX_TYPE_NAS, qmi_over_mbim_nas); + qmi_over_mbim_nas = 0; + } + + mbim_close_device(); + + if (qmi_over_mbim_sk[0] != -1) { + close(qmi_over_mbim_sk[0]); + close(qmi_over_mbim_sk[1]); + } + + return 0; +} + +const struct qmi_device_ops mbim_dev_ops = { + .init = mbim_init, + .deinit = mbim_deinit, + .read = mbim_read_thread, +}; + +static int requestBaseBandVersion(PROFILE_T *profile) { + (void)profile; + return 0; +} + +static int requestGetSIMStatus(SIM_Status *pSIMStatus) +{ + int retval; + + *pSIMStatus = SIM_ABSENT; + retval = mbim_subscriber_status_query(); + if (retval) + goto exit; + mbim_update_state(); + + switch(ReadyState) { + case MBIMSubscriberReadyStateNotInitialized: *pSIMStatus = SIM_NOT_READY; break; + case MBIMSubscriberReadyStateInitialized: *pSIMStatus = SIM_READY; break; + case MBIMSubscriberReadyStateSimNotInserted: *pSIMStatus = SIM_ABSENT; break; + case MBIMSubscriberReadyStateBadSim: *pSIMStatus = SIM_BAD; break; + case MBIMSubscriberReadyStateFailure: *pSIMStatus = SIM_ABSENT; break; + case MBIMSubscriberReadyStateNotActivated: *pSIMStatus = SIM_ABSENT; break; + case MBIMSubscriberReadyStateDeviceLocked: *pSIMStatus = SIM_PIN; break; + default: *pSIMStatus = SIM_ABSENT; break; + } + +exit: + return retval; +} + +static int requestRegistrationState(UCHAR *pPSAttachedState) { + int retval; + + *pPSAttachedState = 0; + retval = mbim_register_state_query(); + if (retval) + goto exit; + mbim_update_state(); + + switch (RegisterState) { + case MBIMRegisterStateUnknown: *pPSAttachedState = 0; break; + case MBIMRegisterStateDeregistered: *pPSAttachedState = 0; break; + case MBIMRegisterStateSearching: *pPSAttachedState = 0; break; + case MBIMRegisterStateHome: *pPSAttachedState = 1; break; + case MBIMRegisterStateRoaming: *pPSAttachedState = 1; break; + case MBIMRegisterStatePartner: *pPSAttachedState = 0; break; + case MBIMRegisterStateDenied: *pPSAttachedState = 0; break; + default: *pPSAttachedState = 0; break; + } + + if (*pPSAttachedState == 0) + goto exit; + + retval = mbim_packet_service_query(); + if (retval) + goto exit; + + switch (PacketServiceState) { + case MBIMPacketServiceStateUnknown: *pPSAttachedState = 0; break; + case MBIMPacketServiceStateAttaching: *pPSAttachedState = 0; break; + case MBIMPacketServiceStateAttached: *pPSAttachedState = 1; break; + case MBIMPacketServiceStateDetaching: *pPSAttachedState = 0; break; + case MBIMPacketServiceStateDetached: *pPSAttachedState = 0; break; + default: *pPSAttachedState = 0; break; + } + + if (*pPSAttachedState == 0) + mbim_packet_service_set(MBIMPacketServiceActionAttach); + +exit: + return retval; +} + +static int requestSetupDataCall(PROFILE_T *profile, int curIpFamily) { + int retval; + + (void)curIpFamily; + + if (profile->apn) + mbim_apn = profile->apn; + if (profile->user) + mbim_user = profile->user; + if (profile->password) + mbim_passwd = profile->password; + if (profile->auth) + mbim_auth = profile->auth; + if (profile->enable_ipv4) + mbim_iptype = MBIMContextIPTypeIPv4; + if (profile->enable_ipv6) + mbim_iptype = MBIMContextIPTypeIPv6; + if (profile->enable_ipv4 && profile->enable_ipv6) + mbim_iptype = MBIMContextIPTypeIPv4AndIPv6; + + retval = mbim_set_connect(1, mbim_sessionID); + if (retval) + goto exit; + +exit: + return retval; +} + +static int requestQueryDataCall(UCHAR *pConnectionStatus, int curIpFamily) { + int retval; + + (void)curIpFamily; + + *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; + + retval = mbim_query_connect(mbim_sessionID); + if (retval) + goto exit; + + switch(ActivationState) { + case MBIMActivationStateUnknown: *pConnectionStatus = QWDS_PKT_DATA_UNKNOW; break; + case MBIMActivationStateActivated: *pConnectionStatus = QWDS_PKT_DATA_CONNECTED; break; + case MBIMActivationStateActivating: *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; break; + case MBIMActivationStateDeactivated: *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; break; + case MBIMActivationStateDeactivating: *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; break; + default: *pConnectionStatus = QWDS_PKT_DATA_DISCONNECTED; break; + } + +exit: + return retval; +} + +static int requestDeactivateDefaultPDP(PROFILE_T *profile, int curIpFamily) { + int retval; + + (void)profile; + (void)curIpFamily; + + retval = mbim_set_connect(0, mbim_sessionID); + if (retval) + goto exit; + +exit: + return retval; +} + +static int requestGetIPAddress(PROFILE_T *profile, int curIpFamily) { + int retval; + + (void)curIpFamily; + retval = mbim_ip_config(profile, mbim_sessionID); + if (retval) + goto exit; + +exit: + return retval; +} + +#ifdef CONFIG_CELLINFO +static int requestGetCellInfoList(void) { + if (qmi_over_mbim_nas) { + if (qmi_request_ops.requestGetCellInfoList) + return qmi_request_ops.requestGetCellInfoList(); + } + + return 0; +} +#endif + +const struct request_ops mbim_request_ops = { + .requestBaseBandVersion = requestBaseBandVersion, + .requestGetSIMStatus = requestGetSIMStatus, + .requestRegistrationState = requestRegistrationState, + .requestSetupDataCall = requestSetupDataCall, + .requestQueryDataCall = requestQueryDataCall, + .requestDeactivateDefaultPDP = requestDeactivateDefaultPDP, + .requestGetIPAddress = requestGetIPAddress, +#ifdef CONFIG_CELLINFO + .requestGetCellInfoList = requestGetCellInfoList, +#endif +}; + +int qmi_over_mbim_qmidev_send(PQCQMIMSG pQMI) { + MBIM_MESSAGE_HEADER *pRequest = NULL; + MBIM_COMMAND_DONE_T *pCmdDone = NULL; + int err; + size_t len = le16toh(pQMI->QMIHdr.Length) + 1; + + if (pQMI->QMIHdr.QMIType != QMUX_TYPE_CTL) { + if (pQMI->QMIHdr.QMIType == QMUX_TYPE_NAS) + pQMI->QMIHdr.ClientId = qmi_over_mbim_nas; + + if (pQMI->QMIHdr.ClientId == 0) { + dbg_time("QMIType %d has no clientID", pQMI->QMIHdr.QMIType); + return -ENODEV; + } + } + + pRequest = compose_qmi_over_mbim_command(1, MBIM_CID_CMD_TYPE_SET, pQMI, len); + err = mbim_send_command(pRequest, &pCmdDone, mbim_default_timeout); + mbim_check_err(err, pRequest, pCmdDone); + + err = -1; + len = le32toh(pCmdDone->InformationBufferLength); + if (len) { + if (write(qmi_over_mbim_sk[0], pCmdDone->InformationBuffer, len) == (long)len) { + err = 0; + }; + } + + mbim_free(pRequest); mbim_free(pCmdDone); + return err; +} diff --git a/wwan/app/quectel_cm_5G/src/qendian.h b/wwan/app/quectel_cm_5G/src/qendian.h new file mode 100644 index 0000000..ba9b766 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/qendian.h @@ -0,0 +1,52 @@ +#ifndef __QUECTEL_ENDIAN_H__ +#define __QUECTEL_ENDIAN_H__ +#include + +#ifndef htole32 +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define htole16(x) (uint16_t)(x) +#define le16toh(x) (uint16_t)(x) +#define letoh16(x) (uint16_t)(x) +#define htole32(x) (uint32_t)(x) +#define le32toh(x) (uint32_t)(x) +#define letoh32(x) (uint32_t)(x) +#define htole64(x) (uint64_t)(x) +#define le64toh(x) (uint64_t)(x) +#define letoh64(x) (uint64_t)(x) +#else +static __inline uint16_t __bswap16(uint16_t __x) { + return (__x<<8) | (__x>>8); +} + +static __inline uint32_t __bswap32(uint32_t __x) { + return (__x>>24) | (__x>>8&0xff00) | (__x<<8&0xff0000) | (__x<<24); +} + +static __inline uint64_t __bswap64(uint64_t __x) { + return (__bswap32(__x)+0ULL<<32) | (__bswap32(__x>>32)); +} + +#define htole16(x) __bswap16(x) +#define le16toh(x) __bswap16(x) +#define letoh16(x) __bswap16(x) +#define htole32(x) __bswap32(x) +#define le32toh(x) __bswap32(x) +#define letoh32(x) __bswap32(x) +#define htole64(x) __bswap64(x) +#define le64toh(x) __bswap64(x) +#define letoh64(x) __bswap64(x) +#endif +#endif + +#define le16_to_cpu(x) le16toh((uint16_t)(x)) +#define le32_to_cpu(x) le32toh((uint32_t)(x)) +#define le64_to_cpu(x) le64toh((uint64_t)(x)) +#define cpu_to_le16(x) htole16((uint16_t)(x)) +#define cpu_to_le32(x) htole32((uint32_t)(x)) +#define cpu_to_le64(x) htole64((uint64_t)(x)) + +static __inline uint32_t ql_swap32(uint32_t __x) { + return (__x>>24) | (__x>>8&0xff00) | (__x<<8&0xff0000) | (__x<<24); +} +#endif //__QUECTEL_ENDIAN_H__ + diff --git a/wwan/app/quectel_cm_5G/src/qlist.h b/wwan/app/quectel_cm_5G/src/qlist.h new file mode 100644 index 0000000..4fe86ba --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/qlist.h @@ -0,0 +1,38 @@ +#ifndef __QUECTEL_LIST_H__ +#define __QUECTEL_LIST_H__ +struct qlistnode +{ + struct qlistnode *next; + struct qlistnode *prev; +}; + +#define qnode_to_item(node, container, member) \ + (container *) (((char*) (node)) - offsetof(container, member)) + +#define qlist_for_each(node, list) \ + for (node = (list)->next; node != (list); node = node->next) + +#define qlist_empty(list) ((list) == (list)->next) +#define qlist_head(list) ((list)->next) +#define qlist_tail(list) ((list)->prev) + +static void qlist_init(struct qlistnode *node) +{ + node->next = node; + node->prev = node; +} + +static void qlist_add_tail(struct qlistnode *head, struct qlistnode *item) +{ + item->next = head; + item->prev = head->prev; + head->prev->next = item; + head->prev = item; +} + +static void qlist_remove(struct qlistnode *item) +{ + item->next->prev = item->prev; + item->prev->next = item->next; +} +#endif \ No newline at end of file diff --git a/wwan/app/quectel_cm_5G/src/qmap_bridge_mode.c b/wwan/app/quectel_cm_5G/src/qmap_bridge_mode.c new file mode 100644 index 0000000..18e825d --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/qmap_bridge_mode.c @@ -0,0 +1,402 @@ +/****************************************************************************** + @file qmap_bridge_mode.c + @brief Connectivity bridge manager. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include "QMIThread.h" + +static size_t ql_fread(const char *filename, void *buf, size_t size) { + FILE *fp = fopen(filename , "r"); + size_t n = 0; + + memset(buf, 0x00, size); + + if (fp) { + n = fread(buf, 1, size, fp); + if (n <= 0 || n == size) { + dbg_time("warnning: fail to fread(%s), fread=%zu, buf_size=%zu: (%s)", filename, n, size, strerror(errno)); + } + fclose(fp); + } + + return n > 0 ? n : 0; +} + +static size_t ql_fwrite(const char *filename, const void *buf, size_t size) { + FILE *fp = fopen(filename , "w"); + size_t n = 0; + + if (fp) { + n = fwrite(buf, 1, size, fp); + if (n != size) { + dbg_time("warnning: fail to fwrite(%s), fwrite=%zu, buf_size=%zu: (%s)", filename, n, size, strerror(errno)); + } + fclose(fp); + } + + return n > 0 ? n : 0; +} + +int ql_bridge_mode_detect(PROFILE_T *profile) { + const char *ifname = profile->qmapnet_adapter[0] ? profile->qmapnet_adapter : profile->usbnet_adapter; + const char *driver; + char bridge_mode[128]; + char bridge_ipv4[128]; + char ipv4[128]; + char buf[64]; + size_t n; + int in_bridge = 0; + + driver = profile->driver_name; + snprintf(bridge_mode, sizeof(bridge_mode), "/sys/class/net/%s/bridge_mode", ifname); + snprintf(bridge_ipv4, sizeof(bridge_ipv4), "/sys/class/net/%s/bridge_ipv4", ifname); + + if (access(bridge_ipv4, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", bridge_mode, errno, strerror(errno)); + return 0; + } + + snprintf(bridge_mode, sizeof(bridge_mode), "/sys/module/%s/parameters/bridge_mode", driver); + snprintf(bridge_ipv4, sizeof(bridge_ipv4), "/sys/module/%s/parameters/bridge_ipv4", driver); + + if (access(bridge_mode, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", bridge_mode, errno, strerror(errno)); + } + return 0; + } + } + + n = ql_fread(bridge_mode, buf, sizeof(buf)); + if (n > 0) { + in_bridge = (buf[0] != '0'); + } + if (!in_bridge) + return 0; + + memset(ipv4, 0, sizeof(ipv4)); + + if (strstr(bridge_ipv4, "/sys/class/net/") || profile->qmap_mode == 0 || profile->qmap_mode == 1) { + snprintf(ipv4, sizeof(ipv4), "0x%x", profile->ipv4.Address); + dbg_time("echo '%s' > %s", ipv4, bridge_ipv4); + ql_fwrite(bridge_ipv4, ipv4, strlen(ipv4)); + } + else { + snprintf(ipv4, sizeof(ipv4), "0x%x:%d", profile->ipv4.Address, profile->muxid); + dbg_time("echo '%s' > %s", ipv4, bridge_ipv4); + ql_fwrite(bridge_ipv4, ipv4, strlen(ipv4)); + } + + return in_bridge; +} + +int ql_enable_qmi_wwan_rawip_mode(PROFILE_T *profile) { + char filename[256]; + char buf[4]; + size_t n; + FILE *fp; + + if (!qmidev_is_qmiwwan(profile->qmichannel)) + return 0; + + snprintf(filename, sizeof(filename), "/sys/class/net/%s/qmi/rawip", profile->usbnet_adapter); + n = ql_fread(filename, buf, sizeof(buf)); + + if (n == 0) + return 0; + + if (buf[0] == '1' || buf[0] == 'Y') + return 0; + + fp = fopen(filename , "w"); + if (fp == NULL) { + dbg_time("Fail to fopen(%s, \"w\"), errno: %d (%s)", filename, errno, strerror(errno)); + return 1; + } + + buf[0] = 'Y'; + n = fwrite(buf, 1, 1, fp); + if (n != 1) { + dbg_time("Fail to fwrite(%s), errno: %d (%s)", filename, errno, strerror(errno)); + fclose(fp); + return 1; + } + fclose(fp); + + return 0; +} + +int ql_driver_type_detect(PROFILE_T *profile) { + if (qmidev_is_gobinet(profile->qmichannel)) { + profile->qmi_ops = &gobi_qmidev_ops; + } + else { + profile->qmi_ops = &qmiwwan_qmidev_ops; + } + qmidev_send = profile->qmi_ops->send; + + return 0; +} + +void ql_set_driver_bridge_mode(PROFILE_T *profile) { + char enable[16]; + char filename[256]; + + if(profile->qmap_mode) + snprintf(filename, sizeof(filename), "/sys/class/net/%s/bridge_mode", profile->qmapnet_adapter); + else + snprintf(filename, sizeof(filename), "/sys/class/net/%s/bridge_mode", profile->usbnet_adapter); + snprintf(enable, sizeof(enable), "%02d\n", profile->enable_bridge); + ql_fwrite(filename, enable, sizeof(enable)); +} + +static int ql_qmi_qmap_mode_detect(PROFILE_T *profile) { + char buf[128]; + int n; + struct { + char filename[255 * 2]; + char linkname[255 * 2]; + } *pl; + + pl = (typeof(pl)) malloc(sizeof(*pl)); + + snprintf(pl->linkname, sizeof(pl->linkname), "/sys/class/net/%s/device/driver", profile->usbnet_adapter); + n = readlink(pl->linkname, pl->filename, sizeof(pl->filename)); + pl->filename[n] = '\0'; + while (pl->filename[n] != '/') + n--; + strncpy(profile->driver_name, &pl->filename[n+1], sizeof(profile->driver_name) - 1); + + ql_get_driver_rmnet_info(profile, &profile->rmnet_info); + if (profile->rmnet_info.size) { + profile->qmap_mode = profile->rmnet_info.qmap_mode; + if (profile->qmap_mode) { + int offset_id = (profile->muxid == 0)? profile->pdp - 1 : profile->muxid - 0x81; + + if (profile->qmap_mode == 1) + offset_id = 0; + profile->muxid = profile->rmnet_info.mux_id[offset_id]; + strncpy(profile->qmapnet_adapter, profile->rmnet_info.ifname[offset_id], sizeof(profile->qmapnet_adapter) - 1); + profile->qmap_size = profile->rmnet_info.rx_urb_size; + profile->qmap_version = profile->rmnet_info.qmap_version; + } + + goto _out; + } + + snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/qmap_mode", profile->usbnet_adapter); + if (access(pl->filename, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno)); + goto _out; + } + + snprintf(pl->filename, sizeof(pl->filename), "/sys/module/%s/parameters/qmap_mode", profile->driver_name); + if (access(pl->filename, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno)); + goto _out; + } + + snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/device/driver/module/parameters/qmap_mode", profile->usbnet_adapter); + if (access(pl->filename, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno)); + goto _out; + } + } + } + } + + if (!access(pl->filename, R_OK)) { + n = ql_fread(pl->filename, buf, sizeof(buf)); + if (n > 0) { + profile->qmap_mode = atoi(buf); + + if (profile->qmap_mode > 1) { + if(!profile->muxid) + profile->muxid = profile->pdp + 0x80; //muxis is 0x8X for PDN-X + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), + "%.16s.%d", profile->usbnet_adapter, profile->muxid - 0x80); + } if (profile->qmap_mode == 1) { + profile->muxid = 0x81; + strncpy(profile->qmapnet_adapter, profile->usbnet_adapter, sizeof(profile->qmapnet_adapter)); + } + } + } + else if (qmidev_is_qmiwwan(profile->qmichannel)) { + snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/qmimux%d", profile->pdp - 1); + if (access(pl->filename, R_OK)) { + if (errno != ENOENT) { + dbg_time("fail to access %s, errno: %d (%s)", pl->filename, errno, strerror(errno)); + } + goto _out; + } + + //upstream Kernel Style QMAP qmi_wwan.c + snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/qmi/add_mux", profile->usbnet_adapter); + n = ql_fread(pl->filename, buf, sizeof(buf)); + if (n >= 5) { + dbg_time("If use QMAP by /sys/class/net/%s/qmi/add_mux", profile->usbnet_adapter); + #if 1 + dbg_time("Please set mtu of wwan0 >= max dl qmap packet size"); + #else + dbg_time("File:%s Line:%d Please make sure add next patch to qmi_wwan.c", __func__, __LINE__); + /* + diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c + index 74bebbd..db8a777 100644 + --- a/drivers/net/usb/qmi_wwan.c + +++ b/drivers/net/usb/qmi_wwan.c + @@ -379,6 +379,24 @@ static ssize_t add_mux_store(struct device *d, struct device_attribute *attr, c + if (!ret) { + info->flags |= QMI_WWAN_FLAG_MUX; + ret = len; + +#if 1 //Add by Quectel + + if (le16_to_cpu(dev->udev->descriptor.idVendor) == 0x2c7c) { + + int idProduct = le16_to_cpu(dev->udev->descriptor.idProduct); + + + + if (idProduct == 0x0121 || idProduct == 0x0125 || idProduct == 0x0435) //MDM9x07 + + dev->rx_urb_size = 4*1024; + + else if (idProduct == 0x0306) //MDM9x40 + + dev->rx_urb_size = 16*1024; + + else if (idProduct == 0x0512) //SDX20 + + dev->rx_urb_size = 32*1024; + + else if (idProduct == 0x0620) //SDX24 + + dev->rx_urb_size = 32*1024; + + else if (idProduct == 0x0800) //SDX55 + + dev->rx_urb_size = 32*1024; + + else + + dev->rx_urb_size = 32*1024; + + } + +#endif + } + err: + rtnl_unlock(); + */ + #endif + profile->qmap_mode = n/5; //0x11\n0x22\n0x33\n + if (profile->qmap_mode > 1) { + //PDN-X map to qmimux-X + if(!profile->muxid) { + profile->muxid = (buf[5*(profile->pdp - 1) + 2] - '0')*16 + (buf[5*(profile->pdp - 1) + 3] - '0'); + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "qmimux%d", profile->pdp - 1); + } else { + profile->muxid = (buf[5*(profile->muxid - 0x81) + 2] - '0')*16 + (buf[5*(profile->muxid - 0x81) + 3] - '0'); + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "qmimux%d", profile->muxid - 0x81); + } + } else if (profile->qmap_mode == 1) { + profile->muxid = (buf[5*0 + 2] - '0')*16 + (buf[5*0 + 3] - '0'); + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), + "qmimux%d", 0); + } + } + } + +_out: + if (profile->qmap_mode) { + if (profile->qmap_size == 0) { + profile->qmap_size = 16*1024; + snprintf(pl->filename, sizeof(pl->filename), "/sys/class/net/%s/qmap_size", profile->usbnet_adapter); + if (!access(pl->filename, R_OK)) { + size_t n; + char buf[32]; + n = ql_fread(pl->filename, buf, sizeof(buf)); + if (n > 0) { + profile->qmap_size = atoi(buf); + } + } + } + + if (profile->qmap_version == 0) { + profile->qmap_version = WDA_DL_DATA_AGG_QMAP_ENABLED; + } + + dbg_time("qmap_mode = %d, qmap_version = %d, qmap_size = %d, muxid = 0x%02x, qmap_netcard = %s", + profile->qmap_mode, profile->qmap_version, profile->qmap_size, profile->muxid, profile->qmapnet_adapter); + } + ql_set_driver_bridge_mode(profile); + free(pl); + + return 0; +} + +static int ql_mbim_usb_vlan_mode_detect(PROFILE_T *profile) { + char tmp[128]; + + snprintf(tmp, sizeof(tmp), "/sys/class/net/%s.%d", profile->usbnet_adapter, profile->pdp); + if (!access(tmp, F_OK)) { + profile->qmap_mode = 4; + profile->muxid = profile->pdp; + no_trunc_strncpy(profile->qmapnet_adapter, tmp + strlen("/sys/class/net/"), sizeof(profile->qmapnet_adapter) - 1); + + dbg_time("mbim_qmap_mode = %d, vlan_id = 0x%02x, qmap_netcard = %s", + profile->qmap_mode, profile->muxid, profile->qmapnet_adapter); + } + + return 0; +} + +static int ql_mbim_mhi_qmap_mode_detect(PROFILE_T *profile) { + ql_get_driver_rmnet_info(profile, &profile->rmnet_info); + if (profile->rmnet_info.size) { + profile->qmap_mode = profile->rmnet_info.qmap_mode; + if (profile->qmap_mode) { + int offset_id = profile->pdp - 1; + + if (profile->qmap_mode == 1) + offset_id = 0; + profile->muxid = profile->pdp; + strcpy(profile->qmapnet_adapter, profile->rmnet_info.ifname[offset_id]); + profile->qmap_size = profile->rmnet_info.rx_urb_size; + profile->qmap_version = profile->rmnet_info.qmap_version; + + dbg_time("mbim_qmap_mode = %d, vlan_id = 0x%02x, qmap_netcard = %s", + profile->qmap_mode, profile->muxid, profile->qmapnet_adapter); + } + + goto _out; + } + +_out: + return 0; +} + +int ql_qmap_mode_detect(PROFILE_T *profile) { + if (profile->software_interface == SOFTWARE_MBIM) { + if (profile->hardware_interface == HARDWARE_USB) + return ql_mbim_usb_vlan_mode_detect(profile); + else if (profile->hardware_interface == HARDWARE_PCIE) + return ql_mbim_mhi_qmap_mode_detect(profile); + } else if (profile->software_interface == SOFTWARE_QMI) { + return ql_qmi_qmap_mode_detect(profile); + } +#ifdef CONFIG_QRTR + else if(profile->software_interface == SOFTWARE_QRTR) { + char tmp[128]; + + profile->qmap_mode = 4; + profile->qmap_version = WDA_DL_DATA_AGG_QMAP_V5_ENABLED; + profile->qmap_size = 31*1024; + profile->muxid = 0x80 | profile->pdp; + snprintf(profile->qmapnet_adapter, sizeof(profile->qmapnet_adapter), "rmnet_data%d", profile->muxid&0xF); + + snprintf(tmp, sizeof(tmp), "/sys/class/net/%s", profile->qmapnet_adapter); + if (access(tmp, F_OK)) { + rtrmnet_ctl_create_vnd(profile->usbnet_adapter, profile->qmapnet_adapter, + profile->muxid, profile->qmap_version, 11, 4096); + } + } +#endif + return 0; +} diff --git a/wwan/app/quectel_cm_5G/src/qrtr.c b/wwan/app/quectel_cm_5G/src/qrtr.c new file mode 100644 index 0000000..450c6bd --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/qrtr.c @@ -0,0 +1,657 @@ +//https://github.com/andersson/qrtr +/****************************************************************************** + @file QrtrCM.c + @brief GobiNet driver. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include "QMIThread.h" + +typedef struct { + uint32_t service; + uint32_t version; + uint32_t instance; + uint32_t node; + uint32_t port; +} QrtrService; + +#define QRTR_MAX (QMUX_TYPE_WDS_ADMIN + 1) +static QrtrService service_list[QRTR_MAX]; +static int qmiclientId[QRTR_MAX]; +static int get_client(UCHAR QMIType); +static uint32_t node_modem = 3; //IPQ ~ 3, QCM ~ 0 + +#ifdef USE_LINUX_MSM_IPC +#include + +struct xport_ipc_router_server_addr { + uint32_t service; + uint32_t instance; + uint32_t node_id; + uint32_t port_id; +}; + +union ctl_msg { + uint32_t cmd; + struct { + uint32_t cmd; + uint32_t service; + uint32_t instance; + uint32_t node_id; + uint32_t port_id; + } srv; + struct { + uint32_t cmd; + uint32_t node_id; + uint32_t port_id; + } cli; + }; +#define CTL_CMD_NEW_SERVER 4 +#define CTL_CMD_REMOVE_SERVER 5 + +#define VERSION_MASK 0xff +#define GET_VERSION(x) (x & 0xff) +#define GET_XPORT_SVC_INSTANCE(x) GET_VERSION(x) +#define GET_INSTANCE(x) ((x & 0xff00) >> 8) + +static int msm_ipc_socket(const char *name) +{ + int sock; + int flags; + + sock = socket(AF_MSM_IPC, SOCK_DGRAM, 0); + if (sock < 0) { + dbg_time("%s(%s) errno: %d (%s)\n", __func__, name, errno, strerror(errno)); + return -1; + } + + fcntl(sock, F_SETFD, FD_CLOEXEC); + flags = fcntl(sock, F_GETFL, 0); + fcntl(sock, F_SETFL, flags | O_NONBLOCK); + + return sock; +} + +static uint32_t xport_lookup +( + int lookup_sock_fd, + uint32_t service_id, + uint32_t version +) +{ + uint32_t num_servers_found = 0; + uint32_t num_entries_to_fill = 4; + struct server_lookup_args *lookup_arg; + int i; + + lookup_arg = (struct server_lookup_args *)malloc(sizeof(*lookup_arg) + + (num_entries_to_fill * sizeof(struct msm_ipc_server_info))); + if (!lookup_arg) + { + dbg_time("%s: Malloc failed\n", __func__); + return 0; + } + + lookup_arg->port_name.service = service_id; + lookup_arg->port_name.instance = GET_XPORT_SVC_INSTANCE(version); + lookup_arg->num_entries_in_array = num_entries_to_fill; + lookup_arg->lookup_mask = VERSION_MASK; + lookup_arg->num_entries_found = 0; + if (ioctl(lookup_sock_fd, IPC_ROUTER_IOCTL_LOOKUP_SERVER, lookup_arg) < 0) + { + dbg_time("%s: Lookup failed for %08x: %08x\n", __func__, service_id, version); + free(lookup_arg); + return 0; + } + + dbg_time("%s: num_entries_found %d for type=%d instance=%d", __func__, + lookup_arg->num_entries_found, service_id, version); + num_servers_found = 0; + for (i = 0; ((i < (int)num_entries_to_fill) && (i < lookup_arg->num_entries_found)); i++) + { + QrtrService service_info[1]; + + if (lookup_arg->srv_info[i].node_id != node_modem) + continue; + num_servers_found++; + + service_info[0].service = lookup_arg->srv_info[i].service; + service_info[0].version = GET_VERSION(lookup_arg->srv_info[i].instance); + service_info[0].instance = GET_INSTANCE(lookup_arg->srv_info[i].instance); + service_info[0].node = lookup_arg->srv_info[i].node_id; + service_info[0].port = lookup_arg->srv_info[i].port_id; + + service_list[service_id] = service_info[0]; + qmiclientId[service_id] = get_client(service_id); + } + + free(lookup_arg); + return num_servers_found; +} + +static int xport_send(int sock, uint32_t node, uint32_t port, const void *data, unsigned int sz) +{ + struct sockaddr_msm_ipc addr = {}; + int rc; + + addr.family = AF_MSM_IPC; + addr.address.addrtype = MSM_IPC_ADDR_ID; + addr.address.addr.port_addr.node_id = node; + addr.address.addr.port_addr.port_id = port; + + rc = sendto(sock, data, sz, MSG_DONTWAIT, (void *)&addr, sizeof(addr)); + if (rc < 0) { + dbg_time("xport_send errno: %d (%s)\n", errno, strerror(errno)); + return -1; + } + + return 0; +} + +static int xport_recv(int sock, void *data, unsigned int sz, uint32_t *node, uint32_t *port) +{ + struct sockaddr_msm_ipc addr = {}; + socklen_t addr_size = sizeof(struct sockaddr_msm_ipc); + int rc; + + rc = recvfrom(sock, data, sz, MSG_DONTWAIT, (void *)&addr, &addr_size); + if (rc < 0) { + dbg_time("xport_recv errno: %d (%s)\n", errno, strerror(errno)); + } + else if (addr.address.addrtype != MSM_IPC_ADDR_ID) { + dbg_time("xport_recv addrtype is NOT MSM_IPC_ADDR_ID\n"); + rc = -1; + } + + *node = addr.address.addr.port_addr.node_id; + *port = addr.address.addr.port_addr.port_id; + return rc; +} +#define qmi_recv xport_recv + +static int xport_ctrl_init(void) +{ + int ctrl_sock; + int rc; + uint32_t instance = 1; //modem + uint32_t version; + + ctrl_sock = msm_ipc_socket("ctrl_port"); + if (ctrl_sock == -1) + return -1; + + rc = ioctl(ctrl_sock, IPC_ROUTER_IOCTL_GET_VERSION, &version); + if (rc < 0) { + dbg_time("%s: failed to get ipc version\n", __func__); + goto init_close_ctrl_fd; + } + dbg_time("%s ipc_version = %d", __func__, version); + + rc = ioctl(ctrl_sock, IPC_ROUTER_IOCTL_BIND_CONTROL_PORT, NULL); + if (rc < 0) { + dbg_time("%s: failed to bind as control port\n", __func__); + goto init_close_ctrl_fd; + } + + //cat /sys/kernel/debug/msm_ipc_router/dump_servers + rc = 0; + rc += xport_lookup(ctrl_sock, QMUX_TYPE_WDS, instance); + if (service_list[QMUX_TYPE_WDS].port) { + qmiclientId[QMUX_TYPE_WDS_IPV6] = get_client(QMUX_TYPE_WDS); + } + rc += xport_lookup(ctrl_sock, QMUX_TYPE_NAS, instance); + rc += xport_lookup(ctrl_sock, QMUX_TYPE_UIM, instance); + rc += xport_lookup(ctrl_sock, QMUX_TYPE_DMS, instance); + rc += xport_lookup(ctrl_sock, QMUX_TYPE_WDS_ADMIN, instance); + + if (rc == 0) { + dbg_time("%s: failed to lookup qmi service\n", __func__); + goto init_close_ctrl_fd; + } + + return ctrl_sock; + +init_close_ctrl_fd: + close(ctrl_sock); + return -1; +} + +static void handle_ctrl_pkt(int sock) { + union ctl_msg pkt; + uint32_t type; + int rc; + + rc = recvfrom(sock, &pkt, sizeof(pkt), 0, NULL, NULL); + if (rc < 0) + return; + + type = le32toh(pkt.cmd); + if (CTL_CMD_NEW_SERVER == type || CTL_CMD_REMOVE_SERVER == type) { + QrtrService s; + + s.service = le32toh(pkt.srv.service); + s.version = le32toh(pkt.srv.instance) & 0xff; + s.instance = le32toh(pkt.srv.instance) >> 8; + s.node = le32toh(pkt.srv.node_id); + s.port = le32toh(pkt.srv.port_id); + + if (debug_qmi) + dbg_time ("[qrtr] %s server on %u:%u -> service %u, version %u, instance %u", + CTL_CMD_NEW_SERVER == type ? "add" : "remove", + s.node, s.port, s.service, s.version, s.instance); + + if (CTL_CMD_NEW_SERVER == type) { + if (s.service < QRTR_MAX) { + service_list[s.service] = s; + } + } + else if (CTL_CMD_REMOVE_SERVER == type) { + if (s.service < QRTR_MAX) { + memset(&service_list[s.service], 0, sizeof(QrtrService)); + } + } + } +} +#else +#include +#include "qrtr.h" +#endif + +static int qrtr_socket(void) +{ + struct sockaddr_qrtr sq; + socklen_t sl = sizeof(sq); + int sock; + int rc; + + sock = socket(AF_QIPCRTR, SOCK_DGRAM, 0); + if (sock < 0) { + dbg_time("qrtr_socket errno: %d (%s)\n", errno, strerror(errno)); + return -1; + } + + rc = getsockname(sock, (void *)&sq, &sl); + if (rc || sq.sq_family != AF_QIPCRTR || sl != sizeof(sq)) { + dbg_time("getsockname: %d (%s)\n", errno, strerror(errno)); + close(sock); + return -1; + } + + return sock; +} + +static int qrtr_send(int sock, uint32_t node, uint32_t port, const void *data, unsigned int sz) +{ + struct sockaddr_qrtr sq = {}; + int rc; + + sq.sq_family = AF_QIPCRTR; + sq.sq_node = node; + sq.sq_port = port; + + rc = sendto(sock, data, sz, MSG_DONTWAIT, (void *)&sq, sizeof(sq)); + if (rc < 0) { + dbg_time("sendto errno: %d (%s)\n", errno, strerror(errno)); + return -1; + } + + return 0; +} + +static int qrtr_recv(int sock, void *data, unsigned int sz, uint32_t *node, uint32_t *port) +{ + struct sockaddr_qrtr sq = {}; + socklen_t sl = sizeof(sq); + int rc; + + rc = recvfrom(sock, data, sz, MSG_DONTWAIT, (void *)&sq, &sl); + if (rc < 0) { + dbg_time("qrtr_recv errno: %d (%s)\n", errno, strerror(errno)); + } + + *node = sq.sq_node; + *port = sq.sq_port; + return rc; + } +#define qmi_recv qrtr_recv + +static int qrtr_ctrl_init(void) { + int sock; + int rc; + struct qrtr_ctrl_pkt pkt; + struct sockaddr_qrtr sq; + socklen_t sl = sizeof(sq); + + sock = qrtr_socket(); + if (sock == -1) + return -1; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = htole32(QRTR_TYPE_NEW_LOOKUP); + + getsockname(sock, (void *)&sq, &sl); + rc = qrtr_send(sock, sq.sq_node, QRTR_PORT_CTRL, &pkt, sizeof(pkt)); + if (rc == -1) { + dbg_time("qrtr_send errno: %d (%s)\n", errno, strerror(errno)); + close(sock); + return -1; + } + + return sock; +} + +static void handle_server_change(uint32_t type, struct qrtr_ctrl_pkt *ppkt) { + struct qrtr_ctrl_pkt pkt = *ppkt; + QrtrService s; + + s.service = le32toh(pkt.server.service); + s.version = le32toh(pkt.server.instance) & 0xff; + s.instance = le32toh(pkt.server.instance) >> 8; + s.node = le32toh(pkt.server.node); + s.port = le32toh(pkt.server.port); + + if (debug_qmi) + dbg_time ("[qrtr] %s server on %u:%u -> service %u, version %u, instance %u", + QRTR_TYPE_NEW_SERVER == type ? "add" : "remove", + s.node, s.port, s.service, s.version, s.instance); + + if (s.node != node_modem) + return; //we only care modem + + if (QRTR_TYPE_NEW_SERVER == type) { + if (s.service < QRTR_MAX) { + service_list[s.service] = s; + } + } + else if (QRTR_TYPE_DEL_SERVER == type) { + if (s.service < QRTR_MAX) { + memset(&service_list[s.service], 0, sizeof(QrtrService)); + } + } + } + +static void handle_ctrl_pkt(int sock) { + struct qrtr_ctrl_pkt pkt; + struct sockaddr_qrtr sq; + socklen_t sl = sizeof(sq); + uint32_t type; + int rc; + + rc = recvfrom(sock, &pkt, sizeof(pkt), 0, (void *)&sq, &sl); + if (rc < 0) + return; + + type = le32toh(pkt.cmd); + if (debug_qmi) + dbg_time("type %u, node %u, sq.port %x, len: %d", type, sq.sq_node, sq.sq_port, rc); + + if (sq.sq_port != QRTR_PORT_CTRL) + return; + + if (QRTR_TYPE_NEW_SERVER == type || QRTR_TYPE_DEL_SERVER == type) { + handle_server_change(type, &pkt); + } +} + +static int get_client(UCHAR QMIType) { + int ClientId; + QrtrService *s = &service_list[QMIType]; + + if (!s ->service) { + dbg_time("%s service: %d for QMIType: %d", __func__, s ->service, QMIType); + return -ENODEV; + } + +#ifdef USE_LINUX_MSM_IPC + ClientId = msm_ipc_socket("xport"); +#else + ClientId = qrtr_socket(); +#endif + if (ClientId == -1) { + return 0; + } + + switch (QMIType) { + case QMUX_TYPE_WDS: dbg_time("Get clientWDS = %d", ClientId); break; + case QMUX_TYPE_DMS: dbg_time("Get clientDMS = %d", ClientId); break; + case QMUX_TYPE_NAS: dbg_time("Get clientNAS = %d", ClientId); break; + case QMUX_TYPE_QOS: dbg_time("Get clientQOS = %d", ClientId); break; + case QMUX_TYPE_WMS: dbg_time("Get clientWMS = %d", ClientId); break; + case QMUX_TYPE_PDS: dbg_time("Get clientPDS = %d", ClientId); break; + case QMUX_TYPE_UIM: dbg_time("Get clientUIM = %d", ClientId); break; + case QMUX_TYPE_WDS_ADMIN: dbg_time("Get clientWDA = %d", ClientId); + break; + default: break; + } + + return ClientId; +} + +static void handle_alloc_client(PROFILE_T *profile) { + int srv_list[] = {QMUX_TYPE_WDS, QMUX_TYPE_NAS, QMUX_TYPE_UIM, QMUX_TYPE_DMS, QMUX_TYPE_WDS_ADMIN}; + size_t i = 0, srv_ready = 0; + static int report = -1; + + if (report != -1) + return; + + for(i = 0; i < sizeof(srv_list)/sizeof(srv_list[0]); i++) { + int srv = srv_list[i]; + + if (service_list[srv].service) + srv_ready++; + else + continue; + + if (qmiclientId[srv] == 0) { + qmiclientId[srv] = get_client(srv); + + if (qmiclientId[srv] != 0) { + if (srv == QMUX_TYPE_WDS) { + qmiclientId[QMUX_TYPE_WDS_IPV6] = get_client(QMUX_TYPE_WDS); + } + else if (srv == QMUX_TYPE_WDS_ADMIN) { + profile->wda_client = qmiclientId[QMUX_TYPE_WDS_ADMIN]; + } + } + } + } + + if (srv_ready == sizeof(srv_list)/sizeof(srv_list[0])) { + if (qmiclientId[QMUX_TYPE_WDS]) { + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); + } else { + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + } + report = 1; + } +} + +static int qmi_send(PQCQMIMSG pRequest) { + uint8_t QMIType = pRequest->QMIHdr.QMIType; + int sock; + QrtrService *s = &service_list[QMIType == QMUX_TYPE_WDS_IPV6 ? QMUX_TYPE_WDS: QMIType]; + sock = qmiclientId[QMIType]; + + pRequest->QMIHdr.ClientId = 0xaa; + if (!s ->service || !sock) { + dbg_time("%s service: %d, sock: %d for QMIType: %d", __func__, s ->service, sock, QMIType); + return -ENODEV; + } + +#ifdef USE_LINUX_MSM_IPC + return xport_send(sock, s->node, s->port, &pRequest->MUXMsg, + le16_to_cpu(pRequest->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR)); +#else + return qrtr_send(sock, s->node, s->port, &pRequest->MUXMsg, + le16_to_cpu(pRequest->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR)); +#endif +} + +static int qmi_deinit(void) { + unsigned int i; + + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] != 0) + { + close(qmiclientId[i]); + qmiclientId[i] = 0; + } + } + + return 0; +} + +static void * qmi_read(void *pData) { + PROFILE_T *profile = (PROFILE_T *)pData; + int ctrl_sock; + int wait_for_request_quit = 0; + +#ifdef USE_LINUX_MSM_IPC + ctrl_sock = xport_ctrl_init(); + if (ctrl_sock != -1) + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_CONNECTED); +#else + ctrl_sock = qrtr_ctrl_init(); +#endif + + if (ctrl_sock == -1) + goto _quit; + + while (1) { + struct pollfd pollfds[16] = {{qmidevice_control_fd[1], POLLIN, 0}, {ctrl_sock, POLLIN, 0}}; + int ne, ret, nevents = 2; + unsigned int i; + + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] != 0) + { + pollfds[nevents].fd = qmiclientId[i]; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents = 0; + nevents++; + } + } + + do { + ret = poll(pollfds, nevents, wait_for_request_quit ? 1000 : -1); + } while ((ret < 0) && (errno == EINTR)); + + if (ret == 0 && wait_for_request_quit) { + QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI() + continue; + } + + if (ret <= 0) { + dbg_time("%s poll=%d, errno: %d (%s)", __func__, ret, errno, strerror(errno)); + break; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dbg_time("%s poll err/hup/inval", __func__); + dbg_time("epoll fd = %d, events = 0x%04x", fd, revents); + if (fd == qmidevice_control_fd[1]) { + } else { + } + if (revents & (POLLERR | POLLHUP | POLLNVAL)) + goto _quit; + } + + if ((revents & POLLIN) == 0) + continue; + + if (fd == qmidevice_control_fd[1]) { + int triger_event; + if (read(fd, &triger_event, sizeof(triger_event)) == sizeof(triger_event)) { + //DBG("triger_event = 0x%x", triger_event); + switch (triger_event) { + case RIL_REQUEST_QUIT: + goto _quit; + break; + case SIG_EVENT_STOP: + wait_for_request_quit = 1; + break; + default: + break; + } + } + } + else if (fd == ctrl_sock) { + handle_ctrl_pkt(ctrl_sock); + handle_alloc_client(profile); + } + else + { + PQCQMIMSG pResponse = (PQCQMIMSG)cm_recv_buf; + int rc; + uint32_t sq_node = 0; + uint32_t sq_port = 0; + + rc = qmi_recv(fd, &pResponse->MUXMsg, sizeof(cm_recv_buf) - sizeof(QCQMI_HDR), &sq_node, &sq_port); + if (debug_qmi) + dbg_time("fd %d, node %u, port %x, len: %d", fd, sq_node, sq_port, rc); + + if (rc <= 0) + { + dbg_time("%s read=%d errno: %d (%s)", __func__, rc, errno, strerror(errno)); + break; + } + + for (i = 0; i < sizeof(qmiclientId)/sizeof(qmiclientId[0]); i++) + { + if (qmiclientId[i] == fd) + { + pResponse->QMIHdr.QMIType = i; + + if (service_list[i].node != sq_node || service_list[i].port != sq_port) { + continue; + } + } + } + + pResponse->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pResponse->QMIHdr.Length = cpu_to_le16(rc + sizeof(QCQMI_HDR) - 1); + pResponse->QMIHdr.CtlFlags = 0x00; + pResponse->QMIHdr.ClientId = 0xaa; + + QmiThreadRecvQMI(pResponse); + } + } + } + +_quit: + qmi_deinit(); + close(ctrl_sock); + qmidevice_send_event_to_main(RIL_INDICATE_DEVICE_DISCONNECTED); + QmiThreadRecvQMI(NULL); //main thread may pending on QmiThreadSendQMI() + dbg_time("%s exit", __func__); + pthread_exit(NULL); + return NULL; +} + +const struct qmi_device_ops qrtr_qmidev_ops = { + .deinit = qmi_deinit, + .send = qmi_send, + .read = qmi_read, +}; + diff --git a/wwan/app/quectel_cm_5G/src/qrtr.h b/wwan/app/quectel_cm_5G/src/qrtr.h new file mode 100644 index 0000000..d1727a8 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/qrtr.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_QRTR_H +#define _LINUX_QRTR_H + +#include +#include + +#ifndef AF_QIPCRTR +#define AF_QIPCRTR 42 +#endif + +#define QRTR_NODE_BCAST 0xffffffffu +#define QRTR_PORT_CTRL 0xfffffffeu + +struct sockaddr_qrtr { + __kernel_sa_family_t sq_family; + __u32 sq_node; + __u32 sq_port; +}; + +enum qrtr_pkt_type { + QRTR_TYPE_DATA = 1, + QRTR_TYPE_HELLO = 2, + QRTR_TYPE_BYE = 3, + QRTR_TYPE_NEW_SERVER = 4, + QRTR_TYPE_DEL_SERVER = 5, + QRTR_TYPE_DEL_CLIENT = 6, + QRTR_TYPE_RESUME_TX = 7, + QRTR_TYPE_EXIT = 8, + QRTR_TYPE_PING = 9, + QRTR_TYPE_NEW_LOOKUP = 10, + QRTR_TYPE_DEL_LOOKUP = 11, +}; + +#define QRTR_TYPE_DEL_PROC 13 + +struct qrtr_ctrl_pkt { + __le32 cmd; + + union { + struct { + __le32 service; + __le32 instance; + __le32 node; + __le32 port; + } server; + + struct { + __le32 node; + __le32 port; + } client; + + struct { + __le32 rsvd; + __le32 node; + } proc; + + }; +} __attribute__ ((packed)); + +#define QRTR_PROTO_VER_1 1 + +struct qrtr_hdr_v1 { + __le32 version; + __le32 type; + __le32 src_node_id; + __le32 src_port_id; + __le32 confirm_rx; + __le32 size; + __le32 dst_node_id; + __le32 dst_port_id; +} __attribute__ ((packed)); + +#endif /* _LINUX_QRTR_H */ diff --git a/wwan/app/quectel_cm_5G/src/quectel-atc-proxy.c b/wwan/app/quectel_cm_5G/src/quectel-atc-proxy.c new file mode 100644 index 0000000..9f7b329 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/quectel-atc-proxy.c @@ -0,0 +1,506 @@ +/****************************************************************************** + @file quectel-atc-proxy.c + @brief atc proxy. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qlist.h" +#include "QMIThread.h" +#include "atchannel.h" +#include "at_tok.h" + +#define dprintf(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0); +#define SYSCHECK(c) do{if((c)<0) {dprintf("%s %d error: '%s' (code: %d)\n", __func__, __LINE__, strerror(errno), errno); return -1;}}while(0) +#define cfmakenoblock(fd) do{fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK);}while(0) + +#define safe_free(__x) do { if (__x) { free((void *)__x); __x = NULL;}} while(0) +#define safe_at_response_free(__x) { if (__x) { at_response_free(__x); __x = NULL;}} + +#define at_response_error(err, p_response) \ + (err \ + || p_response == NULL \ + || p_response->finalResponse == NULL \ + || p_response->success == 0) + +typedef struct { + struct qlistnode qnode; + int ClientFd; + unsigned AccessTime; +} ATC_PROXY_CONNECTION; + +static int atc_proxy_quit = 0; +static pthread_t thread_id = 0; +static int atc_dev_fd = -1; +static int atc_proxy_server_fd = -1; +static struct qlistnode atc_proxy_connection; +static int verbose_debug = 0; +static int modem_reset_flag = 0; +static uint8_t atc_buf[4096]; +static int asr_style_atc = 0; +extern int asprintf(char **s, const char *fmt, ...); +static ATC_PROXY_CONNECTION *current_client_fd = NULL; + +static void dump_atc(uint8_t *pATC, int fd,int size, const char flag) +{ + if (verbose_debug) { + printf("%c %d:\n", flag, fd); + printf("%.*s\n", size, pATC); + } +} + +static int send_atc_to_client(int clientFd, uint8_t *pATC, int size) { + struct pollfd pollfds[]= {{clientFd, POLLOUT, 0}}; + ssize_t ret = 0; + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while (ret == -1 && errno == EINTR && atc_proxy_quit == 0); + + if (pollfds[0].revents & POLLOUT) { + ret = write(clientFd, pATC, size); + } + + return ret; +} + +static void onUnsolicited (const char *s, const char *sms_pdu) +{ + struct qlistnode *con_node; + int ret; + char buf[1024]; + + if(s) { + strcpy(buf, s); + strcat(buf, "\r\n"); + } + if(sms_pdu) { + strcat(buf, sms_pdu); + strcat(buf, "\r\n"); + } + + if(current_client_fd) { + ATC_PROXY_CONNECTION *atc_con = current_client_fd; + ret = send_atc_to_client(atc_con->ClientFd, (uint8_t *)buf, strlen(buf)); + if(ret < 0) { + close(atc_con->ClientFd); + qlist_remove(&atc_con->qnode); + free(atc_con); + } + return; + } + + qlist_for_each(con_node, &atc_proxy_connection) { + ATC_PROXY_CONNECTION *atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode); + if(atc_con && atc_con->ClientFd > 0) { + ret = send_atc_to_client(atc_con->ClientFd, (uint8_t *)buf, strlen(buf)); + if(ret < 0) { + close(atc_con->ClientFd); + con_node = con_node->prev; + qlist_remove(&atc_con->qnode); + free(atc_con); + continue; + } + } + } +} + +static void onTimeout(void) { + dprintf("%s", __func__); + //TODO +} + +static void onClose(void) { + dprintf("%s", __func__); +} + +static int create_local_server(const char *name) { + int sockfd = -1; + int reuse_addr = 1; + struct sockaddr_un sockaddr; + socklen_t alen; + + /*Create server socket*/ + SYSCHECK(sockfd = socket(AF_LOCAL, SOCK_STREAM, 0)); + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_LOCAL; + sockaddr.sun_path[0] = 0; + memcpy(sockaddr.sun_path + 1, name, strlen(name) ); + + alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1; + SYSCHECK(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr))); + if(bind(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) { + close(sockfd); + dprintf("bind %s errno: %d (%s)\n", name, errno, strerror(errno)); + return -1; + } + + dprintf("local server: %s sockfd = %d\n", name, sockfd); + cfmakenoblock(sockfd); + listen(sockfd, 1); + + return sockfd; +} + +static void accept_atc_connection(int serverfd) { + int clientfd = -1; + unsigned char addr[128]; + socklen_t alen = sizeof(addr); + ATC_PROXY_CONNECTION *atc_con; + + clientfd = accept(serverfd, (struct sockaddr *)addr, &alen); + + atc_con = (ATC_PROXY_CONNECTION *)malloc(sizeof(ATC_PROXY_CONNECTION)); + if (atc_con) { + qlist_init(&atc_con->qnode); + atc_con->ClientFd= clientfd; + atc_con->AccessTime = 0; + dprintf("+++ ClientFd=%d\n", atc_con->ClientFd); + qlist_add_tail(&atc_proxy_connection, &atc_con->qnode); + } + + cfmakenoblock(clientfd); +} + +static void cleanup_atc_connection(int clientfd) { + struct qlistnode *con_node; + + qlist_for_each(con_node, &atc_proxy_connection) { + ATC_PROXY_CONNECTION *atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode); + if (atc_con->ClientFd == clientfd) { + dprintf("--- ClientFd=%d\n", atc_con->ClientFd); + close(atc_con->ClientFd); + qlist_remove(&atc_con->qnode); + free(atc_con); + if (current_client_fd == atc_con) + current_client_fd = NULL; + break; + } + } +} + +static int atc_proxy_init(void) { + int err; + char *cmd; + ATResponse *p_response = NULL; + + err = at_handshake(); + if (err) { + dprintf("handshake fail, TODO ... "); + goto exit; + } + + at_send_command_singleline("AT+QCFG=\"usbnet\"", "+QCFG:", NULL); + at_send_command_multiline("AT+QNETDEVCTL=?", "+QNETDEVCTL:", NULL); + at_send_command("AT+CGREG=2", NULL); //GPRS Network Registration Status + at_send_command("AT+CEREG=2", NULL); //EPS Network Registration Status + at_send_command("AT+C5GREG=2", NULL); //5GS Network Registration Status + + at_send_command_singleline("AT+QNETDEVSTATUS=?", "+QNETDEVSTATUS:", &p_response); + if (at_response_error(err, p_response)) + asr_style_atc = 1; //EC200T/EC100Y do not support this AT, but RG801/RG500U support + + safe_at_response_free(p_response); + + err = at_send_command_singleline("AT+QCFG=\"NAT\"", "+QCFG:", &p_response); + if (!at_response_error(err, p_response)) { + int old_nat, new_nat = asr_style_atc ? 1 : 0; + + err = at_tok_scanf(p_response->p_intermediates->line, "%s%d", NULL, &old_nat); + if (err == 2 && old_nat != new_nat) { + safe_at_response_free(p_response); + asprintf(&cmd, "AT+QCFG=\"NAT\",%d", new_nat); + err = at_send_command(cmd, &p_response); + safe_free(cmd); + if (!at_response_error(err, p_response)) { + err = at_send_command("at+cfun=1,1",NULL); + } + safe_at_response_free(p_response); + } + err = 0; + } + safe_at_response_free(p_response); + +exit: + return err; +} + +static void atc_start_server(const char* servername) { + atc_proxy_server_fd = create_local_server(servername); + dprintf("atc_proxy_server_fd = %d\n", atc_proxy_server_fd); + if (atc_proxy_server_fd == -1) { + dprintf("Failed to create %s, errno: %d (%s)\n", servername, errno, strerror(errno)); + } +} + +static void atc_close_server(const char* servername) { + if (atc_proxy_server_fd != -1) { + dprintf("%s %s close server\n", __func__, servername); + close(atc_proxy_server_fd); + atc_proxy_server_fd = -1; + } +} + +static void *atc_proxy_loop(void *param) +{ + uint8_t *pATC = atc_buf; + struct qlistnode *con_node; + ATC_PROXY_CONNECTION *atc_con; + + (void)param; + dprintf("%s enter thread_id %p\n", __func__, (void *)pthread_self()); + + qlist_init(&atc_proxy_connection); + while (atc_dev_fd > 0 && atc_proxy_quit == 0) { + struct pollfd pollfds[2+64]; + int ne, ret, nevents = 0; + ssize_t nreads; + + pollfds[nevents].fd = atc_dev_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (atc_proxy_server_fd > 0) { + pollfds[nevents].fd = atc_proxy_server_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + } + + qlist_for_each(con_node, &atc_proxy_connection) { + atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode); + + pollfds[nevents].fd = atc_con->ClientFd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (nevents == (sizeof(pollfds)/sizeof(pollfds[0]))) + break; + } + + do { + ret = poll(pollfds, nevents, (atc_proxy_server_fd > 0) ? -1 : 200); + } while (ret == -1 && errno == EINTR && atc_proxy_quit == 0); + + if (ret < 0) { + dprintf("%s poll=%d, errno: %d (%s)\n", __func__, ret, errno, strerror(errno)); + goto atc_proxy_loop_exit; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dprintf("%s poll fd = %d, revents = %04x\n", __func__, fd, revents); + if (fd == atc_dev_fd) { + goto atc_proxy_loop_exit; + } else if(fd == atc_proxy_server_fd) { + + } else { + cleanup_atc_connection(fd); + } + continue; + } + + if (!(pollfds[ne].revents & POLLIN)) { + continue; + } + + if (fd == atc_proxy_server_fd) { + accept_atc_connection(fd); + } + else if (fd == atc_dev_fd) { + usleep(10*1000); //let atchannel.c read at response. + if (modem_reset_flag) + goto atc_proxy_loop_exit; + } + else { + memset(atc_buf, 0x0, sizeof(atc_buf)); + nreads = read(fd, pATC, sizeof(atc_buf)); + if (nreads <= 0) { + dprintf("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + cleanup_atc_connection(fd); + break; + } + + dump_atc(pATC, fd, nreads, 'r'); + qlist_for_each(con_node, &atc_proxy_connection) { + atc_con = qnode_to_item(con_node, ATC_PROXY_CONNECTION, qnode); + if (atc_con->ClientFd == pollfds[nevents].fd) { + current_client_fd = atc_con; + break; + } + } + at_send_command ((const char *)pATC, NULL); + current_client_fd = NULL; + } + } + } + +atc_proxy_loop_exit: + at_close(); + while (!qlist_empty(&atc_proxy_connection)) { + ATC_PROXY_CONNECTION *atc_con = qnode_to_item(qlist_head(&atc_proxy_connection), ATC_PROXY_CONNECTION, qnode); + cleanup_atc_connection(atc_con->ClientFd); + } + dprintf("%s exit, thread_id %p\n", __func__, (void *)pthread_self()); + + return NULL; +} + +static void usage(void) { + dprintf(" -d A valid atc device\n" + " default /dev/ttyUSB2, but /dev/ttyUSB2 may be invalid\n" + " -i netcard name\n" + " -v Will show all details\n"); +} + +static void sig_action(int sig) { + if (atc_proxy_quit == 0) { + atc_proxy_quit = 1; + if (thread_id) + pthread_kill(thread_id, sig); + } +} + +int main(int argc, char *argv[]) { + int opt; + char atc_dev[32+1] = "/dev/ttyUSB2"; + int retry_times = 0; + char servername[64] = {0}; + + optind = 1; + signal(SIGINT, sig_action); + + while ( -1 != (opt = getopt(argc, argv, "d:i:vh"))) { + switch (opt) { + case 'd': + strcpy(atc_dev, optarg); + break; + case 'v': + verbose_debug = 1; + break; + default: + usage(); + return 0; + } + } + + if (access(atc_dev, R_OK | W_OK)) { + dprintf("Fail to access %s, errno: %d (%s). break\n", atc_dev, errno, strerror(errno)); + return -1; + } + + sprintf(servername, "quectel-atc-proxy%c", atc_dev[strlen(atc_dev) - 1]); + dprintf("Will use atc-dev='%s', proxy='%s'\n", atc_dev, servername); + + while (atc_proxy_quit == 0) { + if (access(atc_dev, R_OK | W_OK)) { + dprintf("Fail to access %s, errno: %d (%s). continue\n", atc_dev, errno, strerror(errno)); + // wait device + sleep(3); + continue; + } + + atc_dev_fd = open(atc_dev, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (atc_dev_fd == -1) { + dprintf("Failed to open %s, errno: %d (%s). break\n", atc_dev, errno, strerror(errno)); + return -1; + } + cfmakenoblock(atc_dev_fd); + if (at_open(atc_dev_fd, onUnsolicited, 1)) { + close(atc_dev_fd); + atc_dev_fd = -1; + } + at_set_on_timeout(onTimeout); + at_set_on_reader_closed(onClose); + + /* no atc_proxy_loop lives, create one */ + pthread_create(&thread_id, NULL, atc_proxy_loop, NULL); + /* try to redo init if failed, init function must be successfully */ + while (atc_proxy_init() != 0) { + if (retry_times < 5) { + dprintf("fail to init proxy, try again in 2 seconds.\n"); + sleep(2); + retry_times++; + } else { + dprintf("has failed too much times, restart the modem and have a try...\n"); + break; + } + /* break loop if modem is detached */ + if (access(atc_dev, F_OK|R_OK|W_OK)) + break; + } + retry_times = 0; + atc_start_server(servername); + if (atc_proxy_server_fd == -1) + pthread_cancel(thread_id); + pthread_join(thread_id, NULL); + + /* close local server at last */ + atc_close_server(servername); + close(atc_dev_fd); + /* DO RESTART IN 20s IF MODEM RESET ITSELF */ + if (modem_reset_flag) { + unsigned int time_to_wait = 20; + while (time_to_wait) { + time_to_wait = sleep(time_to_wait); + } + modem_reset_flag = 0; + } + } + + return 0; +} \ No newline at end of file diff --git a/wwan/app/quectel_cm_5G/src/quectel-mbim-proxy.c b/wwan/app/quectel_cm_5G/src/quectel-mbim-proxy.c new file mode 100644 index 0000000..5cf6f41 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/quectel-mbim-proxy.c @@ -0,0 +1,453 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qendian.h" + +#define QUECTEL_MBIM_PROXY "quectel-mbim-proxy" +#define safe_close(_fd) do { if (_fd > 0) { close(_fd); _fd = -1; } } while(0) + +#define CM_MAX_CLIENT 32 +#define TID_MASK (0xFFFFFF) +#define TID_SHIFT (24) + +typedef enum { + MBIM_OPEN_MSG = 1, + MBIM_CLOSE_MSG = 2, + MBIM_OPEN_DONE = 0x80000001, + MBIM_CLOSE_DONE = 0x80000002, +} MBIM_MSG; + +typedef struct { + unsigned int MessageType; + unsigned int MessageLength; + unsigned int TransactionId; +} MBIM_MESSAGE_HEADER; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + unsigned int MaxControlTransfer; +} MBIM_OPEN_MSG_T; + +typedef struct { + MBIM_MESSAGE_HEADER MessageHeader; + unsigned int Status; +} MBIM_OPEN_DONE_T; + +typedef struct { + int client_fd; + int client_idx; +} CM_CLIENT_T; + +static unsigned char cm_recv_buffer[4096]; +static CM_CLIENT_T cm_clients[CM_MAX_CLIENT]; +static int verbose = 0; + +const char * get_time(void) { + static char time_buf[128]; + struct timeval tv; + time_t time; + suseconds_t millitm; + struct tm *ti; + + gettimeofday (&tv, NULL); + + time= tv.tv_sec; + millitm = (tv.tv_usec + 500) / 1000; + + if (millitm == 1000) { + ++time; + millitm = 0; + } + + ti = localtime(&time); + sprintf(time_buf, "[%02d-%02d_%02d:%02d:%02d:%03d]", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm); + return time_buf; +} + +#define mbim_debug(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0); + +static int non_block_write(int fd, void *data, int len) +{ + int ret; + struct pollfd pollfd = {fd, POLLOUT, 0}; + ret = poll(&pollfd, 1, 3000); + + if (ret <= 0) { + mbim_debug("%s poll ret=%d, errno: %d(%s)\n", __func__, ret, errno, strerror(errno)); + } + + ret = write (fd, data, len); + if (ret != len) + mbim_debug("%s write ret=%d, errno: %d(%s)\n", __func__, ret, errno, strerror(errno)); + + return len; +} + +static int mbim_send_open_msg(int mbim_dev_fd, uint32_t MaxControlTransfer) { + MBIM_OPEN_MSG_T open_msg; + MBIM_OPEN_MSG_T *pRequest = &open_msg; + + pRequest->MessageHeader.MessageType = htole32(MBIM_OPEN_MSG); + pRequest->MessageHeader.MessageLength = htole32(sizeof(MBIM_OPEN_MSG_T)); + pRequest->MessageHeader.TransactionId = htole32(1); + pRequest->MaxControlTransfer = htole32(MaxControlTransfer); + + mbim_debug("%s()\n", __func__); + return non_block_write(mbim_dev_fd, pRequest, sizeof(MBIM_OPEN_MSG_T)); +} + +/* + * parameter: proxy name + * return: local proxy server fd or -1 +*/ +static int proxy_make_server(const char *proxy_name) +{ + int len, flag; + struct sockaddr_un sockaddr; + int mbim_server_fd; + + mbim_server_fd = socket(AF_LOCAL, SOCK_STREAM, 0); + if (mbim_server_fd < 0) { + mbim_debug("socket failed: %s\n", strerror(errno)); + return -1; + } + if (fcntl(mbim_server_fd, F_SETFL, fcntl(mbim_server_fd, F_GETFL) | O_NONBLOCK) < 0) + mbim_debug("fcntl set server(%d) NONBLOCK attribute failed: %s\n", mbim_server_fd, strerror(errno)); + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_LOCAL; + sockaddr.sun_path[0] = 0; + snprintf(sockaddr.sun_path, UNIX_PATH_MAX, "0%s", proxy_name); + sockaddr.sun_path[0] = '\0'; // string starts with leading '\0' + flag = 1; + if (setsockopt(mbim_server_fd, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof(flag)) < 0) { + safe_close(mbim_server_fd); + mbim_debug("setsockopt failed\n"); + } + + len = strlen(proxy_name) + offsetof(struct sockaddr_un, sun_path) + 1; + if (bind(mbim_server_fd, (struct sockaddr*)&sockaddr, len) < 0) { + safe_close(mbim_server_fd); + mbim_debug("bind failed: %s\n", strerror(errno)); + return -1; + } + + listen(mbim_server_fd, 4); + return mbim_server_fd; +} + +static int handle_client_connect(int server_fd) +{ + int i, client_fd; + struct sockaddr_in cli_addr; + socklen_t len = sizeof(cli_addr); + + client_fd = accept(server_fd, (struct sockaddr *)&cli_addr, &len); + if (client_fd < 0) { + mbim_debug("proxy accept failed: %s\n", strerror(errno)); + return -1; + } + + if (fcntl(client_fd, F_SETFL, fcntl(client_fd, F_GETFL) | O_NONBLOCK) < 0) + mbim_debug("fcntl set client(%d) NONBLOCK attribute failed: %s\n", client_fd, strerror(errno)); + + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_fd <= 0) { + cm_clients[i].client_fd = client_fd; + cm_clients[i].client_idx= i+1; + mbim_debug("%s client_fd=%d, client_idx=%d\n", __func__, cm_clients[i].client_fd, cm_clients[i].client_idx); + return 0; + } + } + + close(client_fd); + return -1; +} + +static void handle_client_disconnect(int client_fd) +{ + int i; + + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_fd == client_fd) { + mbim_debug("%s client_fd=%d, client_idx=%d\n", __func__, cm_clients[i].client_fd, cm_clients[i].client_idx); + safe_close(cm_clients[i].client_fd); + return; + } + } +} + +static int handle_client_request(int mbim_dev_fd, int client_fd, void *pdata, int len) +{ + int i; + int client_idx = -1; + int ret; + MBIM_MESSAGE_HEADER *pRequest = (MBIM_MESSAGE_HEADER *)pdata; + unsigned int TransactionId = le32toh(pRequest->TransactionId); + + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_fd == client_fd) { + client_idx = cm_clients[i].client_idx; + break; + } + } + + if (client_idx == -1) { + goto error; + } + + if (le32toh(pRequest->MessageType) == MBIM_OPEN_MSG + || le32toh(pRequest->MessageType) == MBIM_CLOSE_MSG) { + MBIM_OPEN_DONE_T OpenDone; + OpenDone.MessageHeader.MessageType = htole32(le32toh(pRequest->MessageType) | 0x80000000); + OpenDone.MessageHeader.MessageLength = htole32(sizeof(OpenDone)); + OpenDone.MessageHeader.TransactionId = htole32(TransactionId); + OpenDone.Status = htole32(0); + non_block_write (client_fd, &OpenDone, sizeof(OpenDone)); + return 0; + } + + /* transfer TransicationID to proxy transicationID and record in sender list */ + pRequest->TransactionId = htole32(TransactionId | (client_idx << TID_SHIFT)); + if (verbose) mbim_debug("REQ client_fd=%d, client_idx=%d, tid=%u\n", + cm_clients[client_idx].client_fd, cm_clients[client_idx].client_idx, TransactionId); + ret = non_block_write (mbim_dev_fd, pRequest, len); + if (ret == len) + return 0; + +error: + return -1; +} + +/* + * Will read message from device and transfer it to clients/client + * Notice: + * unsocial message will be send to all clients + */ +static int handle_device_response(void *pdata, int len) +{ + int i; + MBIM_MESSAGE_HEADER *pResponse = (MBIM_MESSAGE_HEADER *)pdata; + unsigned int TransactionId = le32toh(pResponse->TransactionId); + + /* unsocial/function error message */ + if (TransactionId == 0) { + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_fd > 0) { + non_block_write(cm_clients[i].client_fd, pResponse, len); + } + } + } + else { + /* try to find the sender */ + int client_idx = (TransactionId >> TID_SHIFT); + + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_idx == client_idx && cm_clients[i].client_fd > 0) { + TransactionId &= TID_MASK; + pResponse->TransactionId = htole32(TransactionId); + if (verbose) mbim_debug("RSP client_fd=%d, client_idx=%d, tid=%u\n", + cm_clients[i].client_fd, cm_clients[i].client_idx, TransactionId); + non_block_write(cm_clients[i].client_fd, pResponse, len); + break; + } + } + + if (i == CM_MAX_CLIENT) { + mbim_debug("%s nobody care tid=%u\n", __func__, TransactionId); + } + } + + return 0; +} + +static int proxy_loop(int mbim_dev_fd) +{ + int i; + int mbim_server_fd = -1; + + while (mbim_dev_fd > 0) { + struct pollfd pollfds[2+CM_MAX_CLIENT]; + int ne, ret, nevents = 0; + + pollfds[nevents].fd = mbim_dev_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (mbim_server_fd > 0) { + pollfds[nevents].fd = mbim_server_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + for (i = 0; i < CM_MAX_CLIENT; i++) { + if (cm_clients[i].client_fd > 0) { + pollfds[nevents].fd = cm_clients[i].client_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + } + } + } + + ret = poll(pollfds, nevents, (mbim_server_fd > 0) ? -1 : (10*1000)); + if (ret <= 0) { + goto error; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + mbim_debug("%s poll fd = %d, revents = %04x\n", __func__, fd, revents); + if (fd == mbim_dev_fd) { + goto error; + } else if(fd == mbim_server_fd) { + + } else { + handle_client_disconnect(fd); + } + continue; + } + + if (!(pollfds[ne].revents & POLLIN)) { + continue; + } + + if (fd == mbim_server_fd) { + handle_client_connect(fd); + } + else { + int len = read(fd, cm_recv_buffer, sizeof(cm_recv_buffer)); + + if (len <= 0) { + mbim_debug("%s read fd=%d, len=%d, errno: %d(%s)\n", __func__, fd, len, errno, strerror(errno)); + if (fd == mbim_dev_fd) + goto error; + else + handle_client_disconnect(fd); + + return len; + } + + if (fd == mbim_dev_fd) { + if (mbim_server_fd == -1) { + MBIM_OPEN_DONE_T *pOpenDone = (MBIM_OPEN_DONE_T *)cm_recv_buffer; + + if (le32toh(pOpenDone->MessageHeader.MessageType) == MBIM_OPEN_DONE) { + mbim_debug("receive MBIM_OPEN_DONE, status=%d\n", htole32(pOpenDone->Status)); + if (htole32(pOpenDone->Status)) + goto error; + mbim_server_fd = proxy_make_server(QUECTEL_MBIM_PROXY); + mbim_debug("mbim_server_fd=%d\n", mbim_server_fd); + } + } + else { + handle_device_response(cm_recv_buffer, len); + } + } + else { + handle_client_request(mbim_dev_fd, fd, cm_recv_buffer, len); + } + } + } + } + +error: + safe_close(mbim_server_fd); + for (i = 0; i < CM_MAX_CLIENT; i++) { + safe_close(cm_clients[i].client_fd); + } + + mbim_debug("%s exit\n", __func__); + return 0; +} + +/* + * How to use this proxy? + * 1. modprobe -a 8021q + * 2. Create network interface for channels: + * ip link add link wwan0 name wwan0.1 type vlan id 1 + * ip link add link wwan0 name wwan0.2 type vlan id 2 + * 3. Start './mbim-proxy' with -d 'device' + * 4. Start Clients: ./quectel-CM -n id1 + * 5. Start Clients: ./quectel-CM -n id2 + * ... + * Notice: + * mbim-proxy can work in backgroud as a daemon + * '-n' sessionID + * The modem may not support multi-PDN mode or how many PDN it supports is undefined. It depends!!! + * Besides, some modem also may not support some sessionID. For instance EC20 doesn't support SessionId 1... + */ +int main(int argc, char **argv) +{ + int optidx = 0; + int opt; + char *optstr = "d:vh"; + const char *device = "/dev/cdc-wdm0"; + + struct option options[] = { + {"verbose", no_argument, NULL, 'v'}, + {"device", required_argument, NULL, 'd'}, + {0, 0, 0, 0}, + }; + while ((opt = getopt_long(argc, argv, optstr, options, &optidx)) != -1) { + switch (opt) { + case 'v': + verbose = 1; + break; + case 'd': + device = optarg; + break; + case 'h': + mbim_debug("-h Show this message\n"); + mbim_debug("-v Verbose\n"); + mbim_debug("-d [device] MBIM device\n"); + return 0; + default: + mbim_debug("illegal argument\n"); + return -1; + } + } + + if (!device) { + mbim_debug("Missing parameter: device\n"); + return -1; + } + + while (1) { + int mbim_dev_fd = open(device, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (mbim_dev_fd < 0) { + mbim_debug("cannot open mbim_device %s: %s\n", device, strerror(errno)); + sleep(2); + continue; + } + mbim_debug ("mbim_dev_fd=%d\n", mbim_dev_fd); + + memset(cm_clients, 0, sizeof(cm_clients)); + mbim_send_open_msg(mbim_dev_fd, sizeof(cm_recv_buffer)); + proxy_loop(mbim_dev_fd); + safe_close(mbim_dev_fd); + } + + return -1; +} diff --git a/wwan/app/quectel_cm_5G/src/quectel-qmi-proxy.c b/wwan/app/quectel_cm_5G/src/quectel-qmi-proxy.c new file mode 100644 index 0000000..828f1b9 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/quectel-qmi-proxy.c @@ -0,0 +1,700 @@ +/****************************************************************************** + @file quectel-qmi-proxy.c + @brief The qmi proxy. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qendian.h" +#include "qlist.h" +#include "QCQMI.h" +#include "QCQCTL.h" +#include "QCQMUX.h" + +#ifndef MIN +#define MIN(a, b) ((a) < (b)? (a): (b)) +#endif + +const char * get_time(void) { + static char time_buf[128]; + struct timeval tv; + time_t time; + suseconds_t millitm; + struct tm *ti; + + gettimeofday (&tv, NULL); + + time= tv.tv_sec; + millitm = (tv.tv_usec + 500) / 1000; + + if (millitm == 1000) { + ++time; + millitm = 0; + } + + ti = localtime(&time); + sprintf(time_buf, "[%02d-%02d_%02d:%02d:%02d:%03d]", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm); + return time_buf; +} + +#define dprintf(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0); +#define SYSCHECK(c) do{if((c)<0) {dprintf("%s %d error: '%s' (code: %d)\n", __func__, __LINE__, strerror(errno), errno); return -1;}}while(0) +#define cfmakenoblock(fd) do{fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK);}while(0) + +typedef struct { + struct qlistnode qnode; + int ClientFd; + QCQMIMSG qmi[0]; +} QMI_PROXY_MSG; + +typedef struct { + struct qlistnode qnode; + uint8_t QMIType; + uint8_t ClientId; + unsigned AccessTime; +} QMI_PROXY_CLINET; + +typedef struct { + struct qlistnode qnode; + struct qlistnode client_qnode; + int ClientFd; + unsigned AccessTime; +} QMI_PROXY_CONNECTION; + +#ifdef QUECTEL_QMI_MERGE +#define MERGE_PACKET_IDENTITY 0x2c7c +#define MERGE_PACKET_VERSION 0x0001 +#define MERGE_PACKET_MAX_PAYLOAD_SIZE 56 +typedef struct __QMI_MSG_HEADER { + uint16_t idenity; + uint16_t version; + uint16_t cur_len; + uint16_t total_len; +} QMI_MSG_HEADER; + +typedef struct __QMI_MSG_PACKET { + QMI_MSG_HEADER header; + uint16_t len; + char buf[4096]; +} QMI_MSG_PACKET; +#endif + +static int qmi_proxy_quit = 0; +static pthread_t thread_id = 0; +static int cdc_wdm_fd = -1; +static int qmi_proxy_server_fd = -1; +static struct qlistnode qmi_proxy_connection; +static struct qlistnode qmi_proxy_ctl_msg; +static int verbose_debug = 0; +static int modem_reset_flag = 0; +static int qmi_sync_done = 0; +static uint8_t qmi_buf[4096]; + +static int send_qmi_to_cdc_wdm(PQCQMIMSG pQMI); + +#ifdef QUECTEL_QMI_MERGE +static int merge_qmi_rsp_packet(void *buf, ssize_t *src_size) { + static QMI_MSG_PACKET s_QMIPacket; + QMI_MSG_HEADER *header = NULL; + ssize_t size = *src_size; + + if((uint16_t)size < sizeof(QMI_MSG_HEADER)) + return -1; + + header = (QMI_MSG_HEADER *)buf; + if(le16toh(header->idenity) != MERGE_PACKET_IDENTITY || le16toh(header->version) != MERGE_PACKET_VERSION || le16toh(header->cur_len) > le16toh(header->total_len)) + return -1; + + if(le16toh(header->cur_len) == le16toh(header->total_len)) { + *src_size = le16toh(header->total_len); + memcpy(buf, buf + sizeof(QMI_MSG_HEADER), *src_size); + s_QMIPacket.len = 0; + return 0; + } + + memcpy(s_QMIPacket.buf + s_QMIPacket.len, buf + sizeof(QMI_MSG_HEADER), le16toh(header->cur_len)); + s_QMIPacket.len += le16toh(header->cur_len); + + if (le16toh(header->cur_len) < MERGE_PACKET_MAX_PAYLOAD_SIZE || s_QMIPacket.len >= le16toh(header->total_len)) { + memcpy(buf, s_QMIPacket.buf, s_QMIPacket.len); + *src_size = s_QMIPacket.len; + s_QMIPacket.len = 0; + return 0; + } + + return -1; +} +#endif + +static int create_local_server(const char *name) { + int sockfd = -1; + int reuse_addr = 1; + struct sockaddr_un sockaddr; + socklen_t alen; + + /*Create server socket*/ + SYSCHECK(sockfd = socket(AF_LOCAL, SOCK_STREAM, 0)); + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_LOCAL; + sockaddr.sun_path[0] = 0; + memcpy(sockaddr.sun_path + 1, name, strlen(name) ); + + alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1; + SYSCHECK(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr))); + if(bind(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) { + dprintf("bind %s errno: %d (%s)\n", name, errno, strerror(errno)); + close(sockfd); + return -1; + } + + dprintf("local server: %s sockfd = %d\n", name, sockfd); + cfmakenoblock(sockfd); + listen(sockfd, 1); + + return sockfd; +} + +static void accept_qmi_connection(int serverfd) { + int clientfd = -1; + unsigned char addr[128]; + socklen_t alen = sizeof(addr); + QMI_PROXY_CONNECTION *qmi_con; + + clientfd = accept(serverfd, (struct sockaddr *)addr, &alen); + + qmi_con = (QMI_PROXY_CONNECTION *)malloc(sizeof(QMI_PROXY_CONNECTION)); + if (qmi_con) { + qlist_init(&qmi_con->qnode); + qlist_init(&qmi_con->client_qnode); + qmi_con->ClientFd= clientfd; + qmi_con->AccessTime = 0; + dprintf("+++ ClientFd=%d\n", qmi_con->ClientFd); + qlist_add_tail(&qmi_proxy_connection, &qmi_con->qnode); + } + + cfmakenoblock(clientfd); +} + +static void cleanup_qmi_connection(int clientfd, int clientDisconnect) { + struct qlistnode *con_node, *qmi_node; + + qlist_for_each(con_node, &qmi_proxy_connection) { + QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode); + + if (qmi_con->ClientFd == clientfd) { + while (!qlist_empty(&qmi_con->client_qnode)) { + QMI_PROXY_CLINET *qmi_client = qnode_to_item(qlist_head(&qmi_con->client_qnode), QMI_PROXY_CLINET, qnode); + + if (clientDisconnect) { + int size = 17; + QMI_PROXY_MSG *qmi_msg = malloc(sizeof(QMI_PROXY_MSG) + size); + PQCQMIMSG pQMI = &qmi_msg->qmi[0]; + + dprintf("xxx ClientFd=%d QMIType=%d ClientId=%d\n", qmi_con->ClientFd, qmi_client->QMIType, qmi_client->ClientId); + qlist_init(&qmi_msg->qnode); + qmi_msg->ClientFd = qmi_proxy_server_fd; + pQMI->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pQMI->QMIHdr.Length = htole16(16); + pQMI->QMIHdr.CtlFlags = 0x00; + pQMI->QMIHdr.QMIType = QMUX_TYPE_CTL; + pQMI->QMIHdr.ClientId= 0x00; + pQMI->CTLMsg.ReleaseClientIdReq.CtlFlags = QMICTL_FLAG_REQUEST; + pQMI->CTLMsg.ReleaseClientIdReq.TransactionId = 255; + pQMI->CTLMsg.ReleaseClientIdReq.QMICTLType = htole16(QMICTL_RELEASE_CLIENT_ID_REQ); + pQMI->CTLMsg.ReleaseClientIdReq.Length = htole16(5); + pQMI->CTLMsg.ReleaseClientIdReq.TLVType = QCTLV_TYPE_REQUIRED_PARAMETER; + pQMI->CTLMsg.ReleaseClientIdReq.TLVLength = htole16(2); + pQMI->CTLMsg.ReleaseClientIdReq.QMIType = qmi_client->QMIType; + pQMI->CTLMsg.ReleaseClientIdReq.ClientId = qmi_client->ClientId; + + if (qlist_empty(&qmi_proxy_ctl_msg)) + send_qmi_to_cdc_wdm(pQMI); + qlist_add_tail(&qmi_proxy_ctl_msg, &qmi_msg->qnode); + } + + qlist_remove(&qmi_client->qnode); + free(qmi_client); + } + + qlist_for_each(qmi_node, &qmi_proxy_ctl_msg) { + QMI_PROXY_MSG *qmi_msg = qnode_to_item(qmi_node, QMI_PROXY_MSG, qnode); + + if (qmi_msg->ClientFd == qmi_con->ClientFd) { + qlist_remove(&qmi_msg->qnode); + free(qmi_msg); + break; + } + } + + dprintf("--- ClientFd=%d\n", qmi_con->ClientFd); + close(qmi_con->ClientFd); + qlist_remove(&qmi_con->qnode); + free(qmi_con); + break; + } + } +} + +static void get_client_id(QMI_PROXY_CONNECTION *qmi_con, PQMICTL_GET_CLIENT_ID_RESP_MSG pClient) { + if (pClient->QMIResult == 0 && pClient->QMIError == 0) { + QMI_PROXY_CLINET *qmi_client = (QMI_PROXY_CLINET *)malloc(sizeof(QMI_PROXY_CLINET)); + + qlist_init(&qmi_client->qnode); + qmi_client->QMIType = pClient->QMIType; + qmi_client->ClientId = pClient->ClientId; + qmi_client->AccessTime = 0; + + dprintf("+++ ClientFd=%d QMIType=%d ClientId=%d\n", qmi_con->ClientFd, qmi_client->QMIType, qmi_client->ClientId); + qlist_add_tail(&qmi_con->client_qnode, &qmi_client->qnode); + } +} + +static void release_client_id(QMI_PROXY_CONNECTION *qmi_con, PQMICTL_RELEASE_CLIENT_ID_RESP_MSG pClient) { + struct qlistnode *client_node; + + if (pClient->QMIResult == 0 && pClient->QMIError == 0) { + qlist_for_each (client_node, &qmi_con->client_qnode) { + QMI_PROXY_CLINET *qmi_client = qnode_to_item(client_node, QMI_PROXY_CLINET, qnode); + + if (pClient->QMIType == qmi_client->QMIType && pClient->ClientId == qmi_client->ClientId) { + dprintf("--- ClientFd=%d QMIType=%d ClientId=%d\n", qmi_con->ClientFd, qmi_client->QMIType, qmi_client->ClientId); + qlist_remove(&qmi_client->qnode); + free(qmi_client); + break; + } + } + } +} + +static void dump_qmi(PQCQMIMSG pQMI, int fd, const char flag) +{ + if (verbose_debug) + { + unsigned i; + unsigned size = le16toh(pQMI->QMIHdr.Length) + 1; + char buf[128]; + int cnt = 0; + + cnt += snprintf(buf + cnt, sizeof(buf) - cnt, "%c %d %u: ", flag, fd, size); + for (i = 0; i < size && i < 24; i++) + cnt += snprintf(buf + cnt, sizeof(buf) - cnt, "%02x ", ((uint8_t *)pQMI)[i]); + dprintf("%s\n", buf) + } +} + +static int send_qmi_to_cdc_wdm(PQCQMIMSG pQMI) { + struct pollfd pollfds[]= {{cdc_wdm_fd, POLLOUT, 0}}; + ssize_t ret = 0; + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while (ret == -1 && errno == EINTR && qmi_proxy_quit == 0); + + if (pollfds[0].revents & POLLOUT) { + ssize_t size = le16toh(pQMI->QMIHdr.Length) + 1; + ret = write(cdc_wdm_fd, pQMI, size); + dump_qmi(pQMI, cdc_wdm_fd, 'w'); + } + + return ret; +} + +static int send_qmi_to_client(PQCQMIMSG pQMI, int clientFd) { + struct pollfd pollfds[]= {{clientFd, POLLOUT, 0}}; + ssize_t ret = 0; + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while (ret == -1 && errno == EINTR && qmi_proxy_quit == 0); + + if (pollfds[0].revents & POLLOUT) { + ssize_t size = le16toh(pQMI->QMIHdr.Length) + 1; + ret = write(clientFd, pQMI, size); + dump_qmi(pQMI, clientFd, 'w'); + } + + return ret; +} + +static void recv_qmi_from_dev(PQCQMIMSG pQMI) { + struct qlistnode *con_node, *client_node; + + if (qmi_proxy_server_fd == -1) { + qmi_sync_done = 1; + } + else if (pQMI->QMIHdr.QMIType == QMUX_TYPE_CTL) { + if (pQMI->CTLMsg.QMICTLMsgHdr.CtlFlags == QMICTL_CTL_FLAG_RSP) { + if (!qlist_empty(&qmi_proxy_ctl_msg)) { + QMI_PROXY_MSG *qmi_msg = qnode_to_item(qlist_head(&qmi_proxy_ctl_msg), QMI_PROXY_MSG, qnode); + + if (qmi_msg->qmi[0].CTLMsg.QMICTLMsgHdrRsp.TransactionId != pQMI->CTLMsg.QMICTLMsgHdrRsp.TransactionId + || qmi_msg->qmi[0].CTLMsg.QMICTLMsgHdrRsp.QMICTLType != pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) { + dprintf("ERROR: ctl rsp tid:%d, type:%d - ctl req %d, %d\n", + pQMI->CTLMsg.QMICTLMsgHdrRsp.TransactionId, pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType, + qmi_msg->qmi[0].CTLMsg.QMICTLMsgHdrRsp.TransactionId, qmi_msg->qmi[0].CTLMsg.QMICTLMsgHdrRsp.QMICTLType); + } + else if (qmi_msg->ClientFd == qmi_proxy_server_fd) { + if (le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_RELEASE_CLIENT_ID_RESP) { + dprintf("--- ClientFd=%d QMIType=%d ClientId=%d\n", qmi_proxy_server_fd, + pQMI->CTLMsg.ReleaseClientIdRsp.QMIType, pQMI->CTLMsg.ReleaseClientIdRsp.ClientId); + } + } + else { + qlist_for_each(con_node, &qmi_proxy_connection) { + QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode); + + if (qmi_con->ClientFd == qmi_msg->ClientFd) { + send_qmi_to_client(pQMI, qmi_msg->ClientFd); + + if (le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_GET_CLIENT_ID_RESP) { + get_client_id(qmi_con, &pQMI->CTLMsg.GetClientIdRsp); + } + else if (le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_RELEASE_CLIENT_ID_RESP) { + release_client_id(qmi_con, &pQMI->CTLMsg.ReleaseClientIdRsp); + } + else { + } + } + } + } + + qlist_remove(&qmi_msg->qnode); + free(qmi_msg); + + if (!qlist_empty(&qmi_proxy_ctl_msg)) { + QMI_PROXY_MSG *qmi_msg = qnode_to_item(qlist_head(&qmi_proxy_ctl_msg), QMI_PROXY_MSG, qnode); + + send_qmi_to_cdc_wdm(qmi_msg->qmi); + } + } + } + else if (pQMI->QMIHdr.QMIType == QMICTL_CTL_FLAG_IND) { + if (le16toh(pQMI->CTLMsg.QMICTLMsgHdrRsp.QMICTLType) == QMICTL_REVOKE_CLIENT_ID_IND) { + modem_reset_flag = 1; + } + } + } + else { + qlist_for_each(con_node, &qmi_proxy_connection) { + QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode); + + qlist_for_each(client_node, &qmi_con->client_qnode) { + QMI_PROXY_CLINET *qmi_client = qnode_to_item(client_node, QMI_PROXY_CLINET, qnode); + if (pQMI->QMIHdr.QMIType == qmi_client->QMIType) { + if (pQMI->QMIHdr.ClientId == 0 || pQMI->QMIHdr.ClientId == qmi_client->ClientId) { + send_qmi_to_client(pQMI, qmi_con->ClientFd); + } + } + } + } + } +} + +static int recv_qmi_from_client(PQCQMIMSG pQMI, unsigned size, int clientfd) { + if (qmi_proxy_server_fd == -1) + return -1; + + if (pQMI->QMIHdr.QMIType == QMUX_TYPE_CTL) { + QMI_PROXY_MSG *qmi_msg; + + if (pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType == QMICTL_SYNC_REQ) { + dprintf("do not allow client send QMICTL_SYNC_REQ\n"); + return 0; + } + + qmi_msg = malloc(sizeof(QMI_PROXY_MSG) + size); + qlist_init(&qmi_msg->qnode); + qmi_msg->ClientFd = clientfd; + memcpy(qmi_msg->qmi, pQMI, size); + + if (qlist_empty(&qmi_proxy_ctl_msg)) + send_qmi_to_cdc_wdm(pQMI); + qlist_add_tail(&qmi_proxy_ctl_msg, &qmi_msg->qnode); + } + else { + send_qmi_to_cdc_wdm(pQMI); + } + + return 0; +} + +static int qmi_proxy_init(unsigned retry) { + unsigned i; + QCQMIMSG _QMI; + PQCQMIMSG pQMI = &_QMI; + + dprintf("%s enter\n", __func__); + + pQMI->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pQMI->QMIHdr.CtlFlags = 0x00; + pQMI->QMIHdr.QMIType = QMUX_TYPE_CTL; + pQMI->QMIHdr.ClientId= 0x00; + + pQMI->CTLMsg.QMICTLMsgHdr.CtlFlags = QMICTL_FLAG_REQUEST; + + qmi_sync_done = 0; + for (i = 0; i < retry; i++) { + pQMI->CTLMsg.SyncReq.TransactionId = i+1; + pQMI->CTLMsg.SyncReq.QMICTLType = htole16(QMICTL_SYNC_REQ); + pQMI->CTLMsg.SyncReq.Length = htole16(0); + + pQMI->QMIHdr.Length = + htole16(le16toh(pQMI->CTLMsg.QMICTLMsgHdr.Length) + sizeof(QCQMI_HDR) + sizeof(QCQMICTL_MSG_HDR) - 1); + + if (send_qmi_to_cdc_wdm(pQMI) <= 0) + break; + + sleep(1); + if (qmi_sync_done) + break; + } + + dprintf("%s %s\n", __func__, qmi_sync_done ? "succful" : "fail"); + return qmi_sync_done ? 0 : -1; +} + +static void *qmi_proxy_loop(void *param) +{ + PQCQMIMSG pQMI = (PQCQMIMSG)qmi_buf; + struct qlistnode *con_node; + QMI_PROXY_CONNECTION *qmi_con; + + (void)param; + dprintf("%s enter thread_id %p\n", __func__, (void *)pthread_self()); + + qlist_init(&qmi_proxy_connection); + qlist_init(&qmi_proxy_ctl_msg); + + while (cdc_wdm_fd > 0 && qmi_proxy_quit == 0) { + struct pollfd pollfds[2+64]; + int ne, ret, nevents = 0; + ssize_t nreads; + + pollfds[nevents].fd = cdc_wdm_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (qmi_proxy_server_fd > 0) { + pollfds[nevents].fd = qmi_proxy_server_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + } + + qlist_for_each(con_node, &qmi_proxy_connection) { + qmi_con = qnode_to_item(con_node, QMI_PROXY_CONNECTION, qnode); + + pollfds[nevents].fd = qmi_con->ClientFd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (nevents == (sizeof(pollfds)/sizeof(pollfds[0]))) + break; + } + +#if 0 + dprintf("poll "); + for (ne = 0; ne < nevents; ne++) { + dprintf("%d ", pollfds[ne].fd); + } + dprintf("\n"); +#endif + + do { + //ret = poll(pollfds, nevents, -1); + ret = poll(pollfds, nevents, (qmi_proxy_server_fd > 0) ? -1 : 200); + } while (ret == -1 && errno == EINTR && qmi_proxy_quit == 0); + + if (ret < 0) { + dprintf("%s poll=%d, errno: %d (%s)\n", __func__, ret, errno, strerror(errno)); + goto qmi_proxy_loop_exit; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dprintf("%s poll fd = %d, revents = %04x\n", __func__, fd, revents); + if (fd == cdc_wdm_fd) { + goto qmi_proxy_loop_exit; + } else if(fd == qmi_proxy_server_fd) { + + } else { + cleanup_qmi_connection(fd, 1); + } + + continue; + } + + if (!(pollfds[ne].revents & POLLIN)) { + continue; + } + + if (fd == qmi_proxy_server_fd) { + accept_qmi_connection(fd); + } + else if (fd == cdc_wdm_fd) { + nreads = read(fd, pQMI, sizeof(qmi_buf)); + if (nreads <= 0) { + dprintf("%s read=%d errno: %d (%s)\n", __func__, (int)nreads, errno, strerror(errno)); + goto qmi_proxy_loop_exit; + } +#ifdef QUECTEL_QMI_MERGE + if(merge_qmi_rsp_packet(pQMI, &nreads)) + continue; +#endif + if (nreads != (le16toh(pQMI->QMIHdr.Length) + 1)) { + dprintf("%s nreads=%d, pQCQMI->QMIHdr.Length = %d\n", __func__, (int)nreads, le16toh(pQMI->QMIHdr.Length)); + continue; + } + + dump_qmi(pQMI, fd, 'r'); + recv_qmi_from_dev(pQMI); + if (modem_reset_flag) + goto qmi_proxy_loop_exit; + } + else { + nreads = read(fd, pQMI, sizeof(qmi_buf)); + + if (nreads <= 0) { + dprintf("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + cleanup_qmi_connection(fd, 1); + break; + } + + if (nreads != (le16toh(pQMI->QMIHdr.Length) + 1)) { + dprintf("%s nreads=%d, pQCQMI->QMIHdr.Length = %d\n", __func__, (int)nreads, le16toh(pQMI->QMIHdr.Length)); + continue; + } + + dump_qmi(pQMI, fd, 'r'); + recv_qmi_from_client(pQMI, nreads, fd); + } + } + } + +qmi_proxy_loop_exit: + while (!qlist_empty(&qmi_proxy_connection)) { + QMI_PROXY_CONNECTION *qmi_con = qnode_to_item(qlist_head(&qmi_proxy_connection), QMI_PROXY_CONNECTION, qnode); + + cleanup_qmi_connection(qmi_con->ClientFd, 0); + } + + dprintf("%s exit, thread_id %p\n", __func__, (void *)pthread_self()); + + return NULL; +} + +static void usage(void) { + dprintf(" -d A valid qmi device\n" + " default /dev/cdc-wdm0, but cdc-wdm0 may be invalid\n" + " -i netcard name\n" + " -v Will show all details\n"); +} + +static void sig_action(int sig) { + if (qmi_proxy_quit++ == 0) { + if (thread_id) + pthread_kill(thread_id, sig); + } +} + +int main(int argc, char *argv[]) { + int opt; + char cdc_wdm[32+1] = "/dev/cdc-wdm0"; + char servername[64] = {0}; + + optind = 1; + + signal(SIGINT, sig_action); + + while ( -1 != (opt = getopt(argc, argv, "d:i:vh"))) { + switch (opt) { + case 'd': + strcpy(cdc_wdm, optarg); + break; + case 'v': + verbose_debug = 1; + break; + default: + usage(); + return 0; + } + } + + sprintf(servername, "quectel-qmi-proxy%c", cdc_wdm[strlen(cdc_wdm)-1]); + dprintf("Will use cdc-wdm='%s', proxy='%s'\n", cdc_wdm, servername); + + while (qmi_proxy_quit == 0) { + cdc_wdm_fd = open(cdc_wdm, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (cdc_wdm_fd == -1) { + dprintf("Failed to open %s, errno: %d (%s)\n", cdc_wdm, errno, strerror(errno)); + sleep(3); + continue; + } + cfmakenoblock(cdc_wdm_fd); + + /* no qmi_proxy_loop lives, create one */ + pthread_create(&thread_id, NULL, qmi_proxy_loop, NULL); + + if (qmi_proxy_init(60) == 0) { + qmi_proxy_server_fd = create_local_server(servername); + dprintf("qmi_proxy_server_fd = %d\n", qmi_proxy_server_fd); + if (qmi_proxy_server_fd == -1) { + dprintf("Failed to create %s, errno: %d (%s)\n", servername, errno, strerror(errno)); + pthread_cancel(thread_id); + } + } + else { + pthread_cancel(thread_id); + } + + pthread_join(thread_id, NULL); + thread_id = 0; + + if (qmi_proxy_server_fd != -1) { + dprintf("close server %s\n", servername); + close(qmi_proxy_server_fd); + qmi_proxy_server_fd = -1; + } + close(cdc_wdm_fd); + cdc_wdm_fd = -1; + + if (qmi_proxy_quit == 0) + sleep(modem_reset_flag ? 30 : 3); + modem_reset_flag = 0; + } + + return 0; +} diff --git a/wwan/app/quectel_cm_5G/src/quectel-qrtr-proxy.c b/wwan/app/quectel_cm_5G/src/quectel-qrtr-proxy.c new file mode 100644 index 0000000..67ddc16 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/quectel-qrtr-proxy.c @@ -0,0 +1,894 @@ +/****************************************************************************** + @file quectel-qrtr-proxy.c + @brief The qrtr proxy. + + DESCRIPTION + Connectivity Management Tool for USB/PCIE network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qrtr.h" + +#include "qendian.h" +#include "qlist.h" +#include "QCQMI.h" +#include "QCQCTL.h" +#include "QCQMUX.h" + +static const char * get_time(void) { + static char time_buf[128]; + struct timeval tv; + time_t time; + suseconds_t millitm; + struct tm *ti; + + gettimeofday (&tv, NULL); + + time= tv.tv_sec; + millitm = (tv.tv_usec + 500) / 1000; + + if (millitm == 1000) { + ++time; + millitm = 0; + } + + ti = localtime(&time); + sprintf(time_buf, "[%02d-%02d_%02d:%02d:%02d:%03d]", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm); + return time_buf; +} + +#define dprintf(fmt, args...) do { fprintf(stdout, "%s " fmt, get_time(), ##args); } while(0); +#define SYSCHECK(c) do{if((c)<0) {dprintf("%s %d error: '%s' (code: %d)\n", __func__, __LINE__, strerror(errno), errno); return -1;}}while(0) +#define cfmakenoblock(fd) do{fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK);}while(0) +#define align_4(_len) (((_len) + 3) & ~3) + +typedef struct { + struct qlistnode qnode; + int ClientFd; + QCQMIMSG qrtr[0]; +} QRTR_PROXY_MSG; + +typedef struct { + struct qlistnode qnode; + uint8_t QMIType; + uint8_t ClientId; + uint32_t node_id; + uint32_t port_id; + unsigned AccessTime; +} QRTR_PROXY_CLINET; + +typedef struct { + struct qlistnode qnode; + struct qlistnode client_qnode; + int ClientFd; + unsigned AccessTime; +} QRTR_PROXY_CONNECTION; + +typedef struct { + struct qlistnode qnode; + uint32_t service; + uint32_t version; + uint32_t instance; + uint32_t node; + uint32_t port; + + __le32 src_node_id; + __le32 src_port_id; +} QRTR_SERVICE; + +static int qrtr_proxy_quit = 0; +static pthread_t thread_id = 0; +static int cdc_wdm_fd = -1; +static int qrtr_proxy_server_fd = -1; +static struct qlistnode qrtr_proxy_connection; +static struct qlistnode qrtr_server_list; +static int verbose_debug = 0; +static uint32_t node_modem = 3; //IPQ ~ 3, QCM ~ 0 +static uint32_t node_myself = 1; + +static QRTR_SERVICE *find_qrtr_service(uint8_t QMIType) +{ + struct qlistnode *node; + + qlist_for_each (node, &qrtr_server_list) { + QRTR_SERVICE *srv = qnode_to_item(node, QRTR_SERVICE, qnode); + if (srv->service == QMIType) + return srv; + } + + return NULL; +} + +static uint8_t client_bitmap[0xf0]; +static uint8_t port_bitmap[0xff0]; +static int alloc_client_id(void) { + int id = 1; + + for (id = 1; id < (int)sizeof(client_bitmap); id++) { + if (client_bitmap[id] == 0) { + client_bitmap[id] = id; + return id; + } + } + + dprintf("NOT find %s()\n", __func__); + return 0; +} + +static void free_client_id(int id) { + if (id < (int)sizeof(client_bitmap) && client_bitmap[id] == id) { + client_bitmap[id] = 0; + return; + } + dprintf("NOT find %s(id=%d)\n", __func__, id); +} + +static int alloc_port_id(void) { + int id = 1; + + for (id = 1; id < (int)sizeof(port_bitmap); id++) { + if (port_bitmap[id] == 0) { + port_bitmap[id] = id; + return id; + } + } + + dprintf("NOT find %s()\n", __func__); + return 0; +} + +static void free_port_id(int id) { + if (id < (int)sizeof(port_bitmap) && port_bitmap[id] == id) { + port_bitmap[id] = 0; + return; + } + dprintf("NOT find %s(id=%d)\n", __func__, id); +} + +static void dump_qrtr(void *buf, size_t len, char flag) +{ + size_t i; + static char printf_buf[1024]; + int cnt = 0, limit=1024; + unsigned char *d = (unsigned char *)buf; + struct qrtr_hdr_v1 *hdr = (struct qrtr_hdr_v1 *)buf; + const char *ctrl_pkt_strings[] = { + [QRTR_TYPE_DATA] = "data", + [QRTR_TYPE_HELLO] = "hello", + [QRTR_TYPE_BYE] = "bye", + [QRTR_TYPE_NEW_SERVER] = "new-server", + [QRTR_TYPE_DEL_SERVER] = "del-server", + [QRTR_TYPE_DEL_CLIENT] = "del-client", + [QRTR_TYPE_RESUME_TX] = "resume-tx", + [QRTR_TYPE_EXIT] = "exit", + [QRTR_TYPE_PING] = "ping", + [QRTR_TYPE_NEW_LOOKUP] = "new-lookup", + [QRTR_TYPE_DEL_LOOKUP] = "del-lookup", + }; + + for (i = 0; i < len && i < 64; i++) { + if (i%4 == 0) + cnt += snprintf(printf_buf+cnt, limit-cnt, " "); + cnt += snprintf(printf_buf+cnt, limit-cnt, "%02x", d[i]); + } + dprintf("%s\n", printf_buf); + + dprintf("%c ver=%d, type=%d(%s), %x,%x -> %x,%x, confirm_rx=%d, size=%u\n", + flag, + le32toh(hdr->version), le32toh(hdr->type), ctrl_pkt_strings[le32toh(hdr->type)], + le32toh(hdr->src_node_id), le32toh(hdr->src_port_id), le32toh(hdr->dst_node_id), le32toh(hdr->dst_port_id), + le32toh(hdr->confirm_rx), le32toh(hdr->size)); +} + +static int send_qmi_to_client(PQCQMIMSG pQMI, int fd) { + struct pollfd pollfds[]= {{fd, POLLOUT, 0}}; + ssize_t ret = 0; + ssize_t size = le16toh(pQMI->QMIHdr.Length) + 1; + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while (ret == -1 && errno == EINTR && qrtr_proxy_quit == 0); + + if (pollfds[0].revents & POLLOUT) { + ret = write(fd, pQMI, size); + } + + return ret == size ? 0 : -1; +} + +static int send_qrtr_to_dev(struct qrtr_hdr_v1 *hdr, int fd) { + struct pollfd pollfds[]= {{fd, POLLOUT, 0}}; + ssize_t ret = 0; + ssize_t size = align_4(le32toh(hdr->size) + sizeof(*hdr)); + + do { + ret = poll(pollfds, sizeof(pollfds)/sizeof(pollfds[0]), 5000); + } while (ret == -1 && errno == EINTR && qrtr_proxy_quit == 0); + + if (pollfds[0].revents & POLLOUT) { + ret = write(fd, hdr, size); + } + + return ret == size ? 0 : -1; +} + +static int qrtr_node_enqueue(const void *data, size_t len, + int type, struct sockaddr_qrtr *from, + struct sockaddr_qrtr *to, unsigned int confirm_rx) +{ + int rc = -1; + size_t size = sizeof(struct qrtr_hdr_v1) + len; + struct qrtr_hdr_v1 *hdr = (struct qrtr_hdr_v1 *)malloc(align_4(size)); + + if (hdr) { + hdr->version = htole32(QRTR_PROTO_VER_1); + hdr->type = htole32(type); + hdr->src_node_id = htole32(from->sq_node); + hdr->src_port_id = htole32(from->sq_port); + hdr->dst_node_id = htole32(to->sq_node); + hdr->dst_port_id = htole32(to->sq_port); + hdr->size = htole32(len); + hdr->confirm_rx = htole32(!!confirm_rx); + + memcpy(hdr + 1, data, len); + dump_qrtr(hdr, size, '>'); + send_qrtr_to_dev(hdr, cdc_wdm_fd); + free(hdr); + } + + return rc; +} + +static int send_ctrl_hello(__u32 sq_node, __u32 sq_port) +{ + struct qrtr_ctrl_pkt pkt; + int rc; + struct sockaddr_qrtr to = {AF_QIPCRTR, sq_node, sq_port}; + struct sockaddr_qrtr from = {AF_QIPCRTR, node_myself, QRTR_PORT_CTRL}; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = htole32(QRTR_TYPE_HELLO); + + rc = qrtr_node_enqueue(&pkt, sizeof(pkt), QRTR_TYPE_HELLO, &from, &to, 0); + if (rc < 0) + return rc; + + return 0; +} + +static int ctrl_cmd_del_client(__u32 sq_node, __u32 sq_port, uint8_t QMIType) +{ + struct qrtr_ctrl_pkt pkt; + int rc; + struct sockaddr_qrtr to = {AF_QIPCRTR, QRTR_NODE_BCAST, QRTR_PORT_CTRL}; + struct sockaddr_qrtr from = {AF_QIPCRTR, sq_node, sq_port}; + QRTR_SERVICE *srv = find_qrtr_service(QMIType); + + if (srv) { + to.sq_node = srv->src_node_id; + } + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = htole32(QRTR_TYPE_DEL_CLIENT); + pkt.client.node = htole32(sq_node); + pkt.client.port = htole32(sq_port); + + rc = qrtr_node_enqueue(&pkt, sizeof(pkt), QRTR_TYPE_DATA, &from, &to, 0); + if (rc < 0) + return rc; + + return 0; +} + +static void handle_server_change(struct qrtr_hdr_v1 *hdr) { + struct qrtr_ctrl_pkt *pkt = (struct qrtr_ctrl_pkt *)(hdr + 1); + QRTR_SERVICE *s; + + dprintf ("[qrtr] %s server on %u:%u(%u:%u) -> service %u, instance %x\n", + QRTR_TYPE_NEW_SERVER == hdr->type ? "add" : "remove", + le32toh(pkt->server.node), le32toh(pkt->server.port), + le32toh(hdr->src_node_id), le32toh(hdr->src_port_id), + le32toh(pkt->server.service), le32toh(pkt->server.instance)); + + if (le32toh(pkt->server.node) != node_modem) { + return; //we only care modem + } + + s = (QRTR_SERVICE *)malloc(sizeof(QRTR_SERVICE)); + if (!s) + return; + + qlist_init(&s->qnode); + s->service = le32toh(pkt->server.service); + s->version = le32toh(pkt->server.instance) & 0xff; + s->instance = le32toh(pkt->server.instance) >> 8; + s->node = le32toh(pkt->server.node); + s->port = le32toh(pkt->server.port); + + s->src_node_id = le32toh(hdr->src_node_id); + s->src_port_id = le32toh(hdr->src_port_id); + + if (QRTR_TYPE_NEW_SERVER == hdr->type) { + qlist_add_tail(&qrtr_server_list, &s->qnode); + } + else if (QRTR_TYPE_DEL_SERVER == hdr->type) { + qlist_remove(&s->qnode); + } +} + +static int create_local_server(const char *name) { + int sockfd = -1; + int reuse_addr = 1; + struct sockaddr_un sockaddr; + socklen_t alen; + + /*Create server socket*/ + SYSCHECK(sockfd = socket(AF_LOCAL, SOCK_STREAM, 0)); + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_LOCAL; + sockaddr.sun_path[0] = 0; + memcpy(sockaddr.sun_path + 1, name, strlen(name) ); + + alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1; + SYSCHECK(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr))); + if(bind(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) { + close(sockfd); + dprintf("bind %s errno: %d (%s)\n", name, errno, strerror(errno)); + return -1; + } + + dprintf("local server: %s sockfd = %d\n", name, sockfd); + cfmakenoblock(sockfd); + listen(sockfd, 1); + + return sockfd; +} + +static uint8_t alloc_qrtr_client_id(QRTR_PROXY_CONNECTION *qrtr_con, uint8_t QMIType) { + QRTR_PROXY_CLINET *qrtr_client = (QRTR_PROXY_CLINET *)malloc(sizeof(QRTR_PROXY_CLINET)); + + qlist_init(&qrtr_client->qnode); + qrtr_client->QMIType = QMIType; + qrtr_client->ClientId = alloc_client_id(); + qrtr_client->node_id = 1; + qrtr_client->port_id = alloc_port_id(); + qrtr_client->AccessTime = 0; + + dprintf("+++ ClientFd=%d QMIType=%d ClientId=%d, node_id=%d, port_id=%d\n", + qrtr_con->ClientFd, qrtr_client->QMIType, qrtr_client->ClientId, + qrtr_client->node_id, qrtr_client->port_id); + qlist_add_tail(&qrtr_con->client_qnode, &qrtr_client->qnode); + + return qrtr_client->ClientId; +} + +static void release_qrtr_client_id(QRTR_PROXY_CONNECTION *qrtr_con, uint8_t QMIType, uint8_t ClientId) { + struct qlistnode *client_node; + int find = 0; + + qlist_for_each (client_node, &qrtr_con->client_qnode) { + QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(client_node, QRTR_PROXY_CLINET, qnode); + + if (QMIType == qrtr_client->QMIType && ClientId == qrtr_client->ClientId) { + dprintf("--- ClientFd=%d QMIType=%d ClientId=%d, node_id=%d, port_id=%d\n", + qrtr_con->ClientFd, qrtr_client->QMIType, qrtr_client->ClientId, + qrtr_client->node_id, qrtr_client->port_id); + ctrl_cmd_del_client(qrtr_client->node_id, qrtr_client->port_id, qrtr_client->QMIType); + free_client_id(qrtr_client->ClientId); + free_port_id(qrtr_client->port_id); + qlist_remove(&qrtr_client->qnode); + free(qrtr_client); + find++; + break; + } + } + + if (!find) { + dprintf("NOT find on %s(ClientFd=%d, QMIType=%d, ClientId=%d)\n", + __func__, qrtr_con->ClientFd, QMIType, ClientId); + } +} + +static void accept_qrtr_connection(int serverfd) { + int clientfd = -1; + unsigned char addr[128]; + socklen_t alen = sizeof(addr); + QRTR_PROXY_CONNECTION *qrtr_con; + + clientfd = accept(serverfd, (struct sockaddr *)addr, &alen); + + qrtr_con = (QRTR_PROXY_CONNECTION *)malloc(sizeof(QRTR_PROXY_CONNECTION)); + if (qrtr_con) { + qlist_init(&qrtr_con->qnode); + qlist_init(&qrtr_con->client_qnode); + qrtr_con->ClientFd= clientfd; + qrtr_con->AccessTime = 0; + dprintf("+++ ClientFd=%d\n", qrtr_con->ClientFd); + qlist_add_tail(&qrtr_proxy_connection, &qrtr_con->qnode); + } + + cfmakenoblock(clientfd); +} + +static void cleanup_qrtr_connection(int clientfd) { + struct qlistnode *con_node; + int find = 0; + + qlist_for_each(con_node, &qrtr_proxy_connection) { + QRTR_PROXY_CONNECTION *qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode); + + if (qrtr_con->ClientFd == clientfd) { + while (!qlist_empty(&qrtr_con->client_qnode)) { + QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(qlist_head(&qrtr_con->client_qnode), QRTR_PROXY_CLINET, qnode); + + release_qrtr_client_id(qrtr_con, qrtr_client->QMIType, qrtr_client->ClientId); + } + + dprintf("--- ClientFd=%d\n", qrtr_con->ClientFd); + close(qrtr_con->ClientFd); + qlist_remove(&qrtr_con->qnode); + free(qrtr_con); + find = 1; + break; + } + } + + if (!find) { + dprintf("NOT find on %s(ClientFd=%d)\n", __func__, clientfd); + } +} + +static void recv_qrtr_from_dev(struct qrtr_hdr_v1 *hdr) { + int find = 0; + uint32_t type = le32toh(hdr->type); + + if (type == QRTR_TYPE_HELLO) { + send_ctrl_hello(le32toh(hdr->src_node_id), le32toh(hdr->src_port_id)); + find++; + } + else if (type == QRTR_TYPE_NEW_SERVER || type == QRTR_TYPE_DEL_SERVER) { + handle_server_change(hdr); + find++; + } + else if (type == QRTR_TYPE_DATA) { + struct qlistnode *con_node, *client_node; + + qlist_for_each(con_node, &qrtr_proxy_connection) { + QRTR_PROXY_CONNECTION *qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode); + + qlist_for_each(client_node, &qrtr_con->client_qnode) { + QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(client_node, QRTR_PROXY_CLINET, qnode); + + if (qrtr_client->node_id == le32toh(hdr->dst_node_id) && qrtr_client->port_id == le32toh(hdr->dst_port_id)) { + PQCQMIMSG pQMI = (PQCQMIMSG)malloc(hdr->size + sizeof(QCQMI_HDR)); + + if (pQMI) { + pQMI->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pQMI->QMIHdr.Length = htole16(hdr->size + sizeof(QCQMI_HDR) - 1); + pQMI->QMIHdr.CtlFlags = 0x00; + pQMI->QMIHdr.QMIType = qrtr_client->QMIType; + pQMI->QMIHdr.ClientId = qrtr_client->ClientId; + memcpy(&pQMI->MUXMsg, hdr + 1, hdr->size); + send_qmi_to_client(pQMI, qrtr_con->ClientFd); + free(pQMI); + find++; + } + } + } + } + + if (hdr->confirm_rx) { + struct qrtr_ctrl_pkt pkt; + struct sockaddr_qrtr from = {AF_QIPCRTR, le32toh(hdr->dst_node_id), le32toh(hdr->dst_port_id)}; + struct sockaddr_qrtr to = {AF_QIPCRTR, le32toh(hdr->src_node_id), le32toh(hdr->src_port_id)}; + + memset(&pkt, 0, sizeof(pkt)); + pkt.cmd = htole32(QRTR_TYPE_RESUME_TX); + pkt.client.node = hdr->dst_node_id; + pkt.client.port = hdr->dst_port_id; + + qrtr_node_enqueue(&pkt, sizeof(pkt), QRTR_TYPE_RESUME_TX, &from, &to, 0); + } + } + else if (type == QRTR_TYPE_RESUME_TX) { + } + + if (!find) { + dprintf("NOT find on %s()\n", __func__); + } +} + +static int recv_qmi_from_client(PQCQMIMSG pQMI, int clientfd) { + QRTR_PROXY_CONNECTION *qrtr_con; + struct qlistnode *con_node, *client_node; + int find = 0; + + qlist_for_each(con_node, &qrtr_proxy_connection) { + qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode); + if (qrtr_con->ClientFd == clientfd) + break; + qrtr_con = NULL; + } + + if (!qrtr_con) { + return -1; + } + + if (le16toh(pQMI->QMIHdr.QMIType) == QMUX_TYPE_CTL) { + if (pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType == QMICTL_SYNC_REQ) { + dprintf("do not allow client send QMICTL_SYNC_REQ\n"); + return 0; + } + else if (le16toh(pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType) == QMICTL_GET_CLIENT_ID_REQ) { + uint8_t QMIType = pQMI->CTLMsg.GetClientIdReq.QMIType; + PQCQMIMSG pRsp = (PQCQMIMSG)malloc(256); + + if (pRsp) { + uint8_t ClientId = 0; + + if (find_qrtr_service(QMIType)) { + ClientId = alloc_qrtr_client_id(qrtr_con, QMIType); + } + + pRsp->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pRsp->QMIHdr.Length = htole16(sizeof(pRsp->CTLMsg.GetClientIdRsp) + sizeof(pRsp->QMIHdr) - 1); + pRsp->QMIHdr.CtlFlags = 0x00; + pRsp->QMIHdr.QMIType = QMUX_TYPE_CTL; + pRsp->QMIHdr.ClientId = 0; + + pRsp->CTLMsg.QMICTLMsgHdrRsp.CtlFlags = QMICTL_FLAG_RESPONSE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TransactionId = pQMI->CTLMsg.QMICTLMsgHdr.TransactionId; + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMICTLType = pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType; + pRsp->CTLMsg.QMICTLMsgHdrRsp.Length = htole16(sizeof(pRsp->CTLMsg.GetClientIdRsp) - sizeof(pRsp->CTLMsg.QMICTLMsgHdr)); + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVType = QCTLV_TYPE_RESULT_CODE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVLength = htole16(4); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXResult = htole16(ClientId ? 0 : QMI_RESULT_FAILURE); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXError = htole16(ClientId ? 0 : QMI_ERR_INTERNAL); + pRsp->CTLMsg.GetClientIdRsp.TLV2Type = QCTLV_TYPE_REQUIRED_PARAMETER; + pRsp->CTLMsg.GetClientIdRsp.TLV2Length = htole16(2); + pRsp->CTLMsg.GetClientIdRsp.QMIType = QMIType; + pRsp->CTLMsg.GetClientIdRsp.ClientId = ClientId; + + send_qmi_to_client(pRsp, clientfd); + free(pRsp); + find++; + } + } + else if (le16toh(pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType) == QMICTL_RELEASE_CLIENT_ID_REQ) { + PQCQMIMSG pRsp = (PQCQMIMSG)malloc(256); + release_qrtr_client_id(qrtr_con, pQMI->CTLMsg.ReleaseClientIdReq.QMIType, pQMI->CTLMsg.ReleaseClientIdReq.ClientId); + + if (pRsp) { + pRsp->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pRsp->QMIHdr.Length = htole16(sizeof(pRsp->CTLMsg.ReleaseClientIdRsp) + sizeof(pRsp->QMIHdr) - 1); + pRsp->QMIHdr.CtlFlags = 0x00; + pRsp->QMIHdr.QMIType = QMUX_TYPE_CTL; + pRsp->QMIHdr.ClientId = 0; + + pRsp->CTLMsg.QMICTLMsgHdrRsp.CtlFlags = QMICTL_FLAG_RESPONSE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TransactionId = pQMI->CTLMsg.QMICTLMsgHdr.TransactionId; + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMICTLType = pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType; + pRsp->CTLMsg.QMICTLMsgHdrRsp.Length = htole16(sizeof(pRsp->CTLMsg.ReleaseClientIdRsp) - sizeof(pRsp->CTLMsg.QMICTLMsgHdr)); + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVType = QCTLV_TYPE_RESULT_CODE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVLength = htole16(4); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXResult = htole16(0); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXError = htole16(0); + pRsp->CTLMsg.ReleaseClientIdRsp.TLV2Type = QCTLV_TYPE_REQUIRED_PARAMETER; + pRsp->CTLMsg.ReleaseClientIdRsp.TLV2Length = htole16(2); + pRsp->CTLMsg.ReleaseClientIdRsp.QMIType = pQMI->CTLMsg.ReleaseClientIdReq.QMIType; + pRsp->CTLMsg.ReleaseClientIdRsp.ClientId = pQMI->CTLMsg.ReleaseClientIdReq.ClientId; + + send_qmi_to_client(pRsp, clientfd); + free(pRsp); + find++; + } + } + else if (le16toh(pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType) == QMICTL_GET_VERSION_REQ) { + PQCQMIMSG pRsp = (PQCQMIMSG)malloc(256); + + if (pRsp) { + pRsp->QMIHdr.IFType = USB_CTL_MSG_TYPE_QMI; + pRsp->QMIHdr.Length = htole16(sizeof(pRsp->CTLMsg.GetVersionRsp) + sizeof(pRsp->QMIHdr) - 1); + pRsp->QMIHdr.CtlFlags = 0x00; + pRsp->QMIHdr.QMIType = QMUX_TYPE_CTL; + pRsp->QMIHdr.ClientId = 0; + + pRsp->CTLMsg.QMICTLMsgHdrRsp.CtlFlags = QMICTL_FLAG_RESPONSE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TransactionId = pQMI->CTLMsg.QMICTLMsgHdr.TransactionId; + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMICTLType = pQMI->CTLMsg.QMICTLMsgHdr.QMICTLType; + pRsp->CTLMsg.QMICTLMsgHdrRsp.Length = htole16(sizeof(pRsp->CTLMsg.GetVersionRsp) - sizeof(pRsp->CTLMsg.QMICTLMsgHdr)); + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVType = QCTLV_TYPE_RESULT_CODE; + pRsp->CTLMsg.QMICTLMsgHdrRsp.TLVLength = htole16(4); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXResult = htole16(0); + pRsp->CTLMsg.QMICTLMsgHdrRsp.QMUXError = htole16(0); + pRsp->CTLMsg.GetVersionRsp.TLV2Type = QCTLV_TYPE_REQUIRED_PARAMETER; + pRsp->CTLMsg.GetVersionRsp.TLV2Length = htole16(1); + pRsp->CTLMsg.GetVersionRsp.NumElements = 0; + + send_qmi_to_client(pRsp, clientfd); + free(pRsp); + find++; + } + } + } + else { + qlist_for_each (client_node, &qrtr_con->client_qnode) { + QRTR_PROXY_CLINET *qrtr_client = qnode_to_item(client_node, QRTR_PROXY_CLINET, qnode); + + if (pQMI->QMIHdr.QMIType == qrtr_client->QMIType && pQMI->QMIHdr.ClientId == qrtr_client->ClientId) { + QRTR_SERVICE *srv = find_qrtr_service(pQMI->QMIHdr.QMIType); + + if (srv && srv->service) { + struct sockaddr_qrtr from = {AF_QIPCRTR, qrtr_client->node_id, qrtr_client->port_id}; + struct sockaddr_qrtr to = {AF_QIPCRTR, srv->node, srv->port}; + + qrtr_node_enqueue(&pQMI->MUXMsg, le16toh(pQMI->QMIHdr.Length) + 1 - sizeof(QCQMI_HDR), + QRTR_TYPE_DATA, &from, &to, 0); + find++; + } + break; + } + } + } + + if (!find) { + dprintf("NOT find on %s()\n", __func__); + } + + return 0; +} + +static int qrtr_proxy_init(void) { + unsigned i; + int qrtr_sync_done = 0; + + dprintf("%s enter\n", __func__); + send_ctrl_hello(QRTR_NODE_BCAST, QRTR_PORT_CTRL); + + for (i = 0; i < 10; i++) { + sleep(1); + qrtr_sync_done = !qlist_empty(&qrtr_server_list); + if (qrtr_sync_done) + break; + } + + dprintf("%s %s\n", __func__, qrtr_sync_done ? "succful" : "fail"); + return qrtr_sync_done ? 0 : -1; +} + +static void qrtr_start_server(const char* servername) { + qrtr_proxy_server_fd = create_local_server(servername); + dprintf("qrtr_proxy_server_fd = %d\n", qrtr_proxy_server_fd); + if (qrtr_proxy_server_fd == -1) { + dprintf("Failed to create %s, errno: %d (%s)\n", servername, errno, strerror(errno)); + } +} + +static void qrtr_close_server(const char* servername) { + if (qrtr_proxy_server_fd != -1) { + dprintf("%s %s\n", __func__, servername); + close(qrtr_proxy_server_fd); + qrtr_proxy_server_fd = -1; + } +} + +static void *qrtr_proxy_loop(void *param) +{ + void *rx_buf; + struct qlistnode *con_node; + QRTR_PROXY_CONNECTION *qrtr_con; + + (void)param; + dprintf("%s enter thread_id %p\n", __func__, (void *)pthread_self()); + + rx_buf = malloc(8192); + if (!rx_buf) + return NULL; + + while (cdc_wdm_fd > 0 && qrtr_proxy_quit == 0) { + struct pollfd pollfds[32]; + int ne, ret, nevents = 0; + ssize_t nreads; + + pollfds[nevents].fd = cdc_wdm_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (qrtr_proxy_server_fd > 0) { + pollfds[nevents].fd = qrtr_proxy_server_fd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + } + + qlist_for_each(con_node, &qrtr_proxy_connection) { + qrtr_con = qnode_to_item(con_node, QRTR_PROXY_CONNECTION, qnode); + + pollfds[nevents].fd = qrtr_con->ClientFd; + pollfds[nevents].events = POLLIN; + pollfds[nevents].revents= 0; + nevents++; + + if (nevents == (sizeof(pollfds)/sizeof(pollfds[0]))) + break; + } + + do { + //ret = poll(pollfds, nevents, -1); + ret = poll(pollfds, nevents, (qrtr_proxy_server_fd > 0) ? -1 : 200); + } while (ret == -1 && errno == EINTR && qrtr_proxy_quit == 0); + + if (ret < 0) { + dprintf("%s poll=%d, errno: %d (%s)\n", __func__, ret, errno, strerror(errno)); + goto qrtr_proxy_loop_exit; + } + + for (ne = 0; ne < nevents; ne++) { + int fd = pollfds[ne].fd; + short revents = pollfds[ne].revents; + + if (revents & (POLLERR | POLLHUP | POLLNVAL)) { + dprintf("%s poll fd = %d, revents = %04x\n", __func__, fd, revents); + if (fd == cdc_wdm_fd) { + goto qrtr_proxy_loop_exit; + } + else if (fd == qrtr_proxy_server_fd) { + + } + else { + cleanup_qrtr_connection(fd); + } + + continue; + } + + if (!(pollfds[ne].revents & POLLIN)) { + continue; + } + + if (fd == qrtr_proxy_server_fd) { + accept_qrtr_connection(fd); + } + else if (fd == cdc_wdm_fd) { + struct qrtr_hdr_v1 *hdr = (struct qrtr_hdr_v1 *)rx_buf; + + nreads = read(fd, rx_buf, 8192); + if (nreads <= 0) { + dprintf("%s read=%d errno: %d (%s)\n", __func__, (int)nreads, errno, strerror(errno)); + goto qrtr_proxy_loop_exit; + } + else if (nreads != (int)align_4(le32toh(hdr->size) + sizeof(*hdr))) { + dprintf("%s nreads=%d, hdr->size = %d\n", __func__, (int)nreads, le32toh(hdr->size)); + continue; + } + + dump_qrtr(hdr, nreads, '<'); + recv_qrtr_from_dev(hdr); + } + else { + PQCQMIMSG pQMI = (PQCQMIMSG)rx_buf; + + nreads = read(fd, rx_buf, 8192); + if (nreads <= 0) { + dprintf("%s read=%d errno: %d (%s)", __func__, (int)nreads, errno, strerror(errno)); + cleanup_qrtr_connection(fd); + break; + } + else if (nreads != (le16toh(pQMI->QMIHdr.Length) + 1)) { + dprintf("%s nreads=%d, pQCQMI->QMIHdr.Length = %d\n", __func__, (int)nreads, le16toh(pQMI->QMIHdr.Length)); + continue; + } + + recv_qmi_from_client(pQMI, fd); + } + } + } + +qrtr_proxy_loop_exit: + while (!qlist_empty(&qrtr_proxy_connection)) { + QRTR_PROXY_CONNECTION *qrtr_con = qnode_to_item(qlist_head(&qrtr_proxy_connection), QRTR_PROXY_CONNECTION, qnode); + + cleanup_qrtr_connection(qrtr_con->ClientFd); + } + + dprintf("%s exit, thread_id %p\n", __func__, (void *)pthread_self()); + free(rx_buf); + + return NULL; +} + +static void usage(void) { + dprintf(" -d A valid qrtr device\n" + " default /dev/mhi_IPCR, but mhi_IPCR may be invalid\n" + " -i netcard name\n" + " -v Will show all details\n"); +} + +static void sig_action(int sig) { + if (qrtr_proxy_quit == 0) { + qrtr_proxy_quit = 1; + if (thread_id) + pthread_kill(thread_id, sig); + } +} + +int main(int argc, char *argv[]) { + int opt; + char cdc_wdm[32+1] = "/dev/mhi_IPCR"; + char servername[64] = {0}; + + signal(SIGINT, sig_action); + signal(SIGTERM, sig_action); + + optind = 1; + while ( -1 != (opt = getopt(argc, argv, "d:i:vh"))) { + switch (opt) { + case 'd': + strcpy(cdc_wdm, optarg); + break; + case 'v': + verbose_debug = 1; + break; + default: + usage(); + return 0; + } + } + + sprintf(servername, "quectel-qrtr-proxy%c", cdc_wdm[strlen(cdc_wdm)-1]); + dprintf("Will use cdc-wdm='%s', proxy='%s'\n", cdc_wdm, servername); + + while (qrtr_proxy_quit == 0) { + cdc_wdm_fd = open(cdc_wdm, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (cdc_wdm_fd == -1) { + dprintf("Failed to open %s, errno: %d (%s)\n", cdc_wdm, errno, strerror(errno)); + sleep(5); + continue; + } + cfmakenoblock(cdc_wdm_fd); + qlist_init(&qrtr_proxy_connection); + qlist_init(&qrtr_server_list); + pthread_create(&thread_id, NULL, qrtr_proxy_loop, NULL); + + if (qrtr_proxy_init() == 0) { + qrtr_start_server(servername); + pthread_join(thread_id, NULL); + qrtr_close_server(servername); + } + else { + pthread_cancel(thread_id); + pthread_join(thread_id, NULL); + } + + close(cdc_wdm_fd); + } + + return 0; +} diff --git a/wwan/app/quectel_cm_5G/src/rmnetctl.c b/wwan/app/quectel_cm_5G/src/rmnetctl.c new file mode 100644 index 0000000..3a9aae9 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/rmnetctl.c @@ -0,0 +1,342 @@ +//https://source.codeaurora.org/quic/la/platform/vendor/qcom-opensource/dataservices/tree/rmnetctl +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RMNETCTL_SUCCESS 0 +#define RMNETCTL_LIB_ERR 1 +#define RMNETCTL_KERNEL_ERR 2 +#define RMNETCTL_INVALID_ARG 3 + +enum rmnetctl_error_codes_e { + RMNETCTL_API_SUCCESS = 0, + + RMNETCTL_API_FIRST_ERR = 1, + RMNETCTL_API_ERR_MESSAGE_SEND = 3, + RMNETCTL_API_ERR_MESSAGE_RECEIVE = 4, + + RMNETCTL_INIT_FIRST_ERR = 5, + RMNETCTL_INIT_ERR_PROCESS_ID = RMNETCTL_INIT_FIRST_ERR, + RMNETCTL_INIT_ERR_NETLINK_FD = 6, + RMNETCTL_INIT_ERR_BIND = 7, + + RMNETCTL_API_SECOND_ERR = 9, + RMNETCTL_API_ERR_HNDL_INVALID = RMNETCTL_API_SECOND_ERR, + RMNETCTL_API_ERR_RETURN_TYPE = 13, +}; + +struct rmnetctl_hndl_s { + uint32_t pid; + uint32_t transaction_id; + int netlink_fd; + struct sockaddr_nl src_addr, dest_addr; +}; +typedef struct rmnetctl_hndl_s rmnetctl_hndl_t; + +#define NLMSG_TAIL(nmsg) \ + ((struct rtattr *) (((char *)(nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len))) + +struct nlmsg { + struct nlmsghdr nl_addr; + struct ifinfomsg ifmsg; + char data[500]; +}; + +#define MIN_VALID_PROCESS_ID 0 +#define MIN_VALID_SOCKET_FD 0 +#define KERNEL_PROCESS_ID 0 +#define UNICAST 0 + +enum { + IFLA_RMNET_UL_AGG_PARAMS = __IFLA_RMNET_MAX, + __IFLA_RMNET_EXT_MAX, +}; + +struct rmnet_egress_agg_params { + uint16_t agg_size; + uint16_t agg_count; + uint32_t agg_time; +}; + +static int rmnet_get_ack(rmnetctl_hndl_t *hndl, uint16_t *error_code) +{ + struct nlack { + struct nlmsghdr ackheader; + struct nlmsgerr ackdata; + char data[256]; + + } ack; + int i; + + if (!hndl || !error_code) + return RMNETCTL_INVALID_ARG; + + if ((i = recv(hndl->netlink_fd, &ack, sizeof(ack), 0)) < 0) { + *error_code = errno; + return RMNETCTL_API_ERR_MESSAGE_RECEIVE; + } + + /*Ack should always be NLMSG_ERROR type*/ + if (ack.ackheader.nlmsg_type == NLMSG_ERROR) { + if (ack.ackdata.error == 0) { + *error_code = RMNETCTL_API_SUCCESS; + return RMNETCTL_SUCCESS; + } else { + *error_code = -ack.ackdata.error; + return RMNETCTL_KERNEL_ERR; + } + } + + *error_code = RMNETCTL_API_ERR_RETURN_TYPE; + return RMNETCTL_API_FIRST_ERR; +} + +static int rtrmnet_ctl_init(rmnetctl_hndl_t **hndl, uint16_t *error_code) +{ + struct sockaddr_nl __attribute__((__may_alias__)) *saddr_ptr; + int netlink_fd = -1; + pid_t pid = 0; + + if (!hndl || !error_code) + return RMNETCTL_INVALID_ARG; + + *hndl = (rmnetctl_hndl_t *)malloc(sizeof(rmnetctl_hndl_t)); + if (!*hndl) { + *error_code = RMNETCTL_API_ERR_HNDL_INVALID; + return RMNETCTL_LIB_ERR; + } + + memset(*hndl, 0, sizeof(rmnetctl_hndl_t)); + + pid = getpid(); + if (pid < MIN_VALID_PROCESS_ID) { + free(*hndl); + *error_code = RMNETCTL_INIT_ERR_PROCESS_ID; + return RMNETCTL_LIB_ERR; + } + (*hndl)->pid = KERNEL_PROCESS_ID; + netlink_fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if (netlink_fd < MIN_VALID_SOCKET_FD) { + free(*hndl); + *error_code = RMNETCTL_INIT_ERR_NETLINK_FD; + return RMNETCTL_LIB_ERR; + } + + (*hndl)->netlink_fd = netlink_fd; + + memset(&(*hndl)->src_addr, 0, sizeof(struct sockaddr_nl)); + + (*hndl)->src_addr.nl_family = AF_NETLINK; + (*hndl)->src_addr.nl_pid = (*hndl)->pid; + + saddr_ptr = &(*hndl)->src_addr; + if (bind((*hndl)->netlink_fd, + (struct sockaddr *)saddr_ptr, + sizeof(struct sockaddr_nl)) < 0) { + close((*hndl)->netlink_fd); + free(*hndl); + *error_code = RMNETCTL_INIT_ERR_BIND; + return RMNETCTL_LIB_ERR; + } + + memset(&(*hndl)->dest_addr, 0, sizeof(struct sockaddr_nl)); + + (*hndl)->dest_addr.nl_family = AF_NETLINK; + (*hndl)->dest_addr.nl_pid = KERNEL_PROCESS_ID; + (*hndl)->dest_addr.nl_groups = UNICAST; + + return RMNETCTL_SUCCESS; +} + +static int rtrmnet_ctl_deinit(rmnetctl_hndl_t *hndl) +{ + if (!hndl) + return RMNETCTL_SUCCESS; + + close(hndl->netlink_fd); + free(hndl); + + return RMNETCTL_SUCCESS; +} + +static int rtrmnet_ctl_newvnd(rmnetctl_hndl_t *hndl, char *devname, char *vndname, + uint16_t *error_code, uint8_t index, + uint32_t flagconfig, uint32_t ul_agg_cnt, uint32_t ul_agg_size) +{ + struct rtattr *attrinfo, *datainfo, *linkinfo; + struct ifla_vlan_flags flags; + int devindex = 0, val = 0; + char *kind = "rmnet"; + struct nlmsg req; + short id; + + if (!hndl || !devname || !vndname || !error_code) + return RMNETCTL_INVALID_ARG; + + memset(&req, 0, sizeof(req)); + req.nl_addr.nlmsg_type = RTM_NEWLINK; + req.nl_addr.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); + req.nl_addr.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL | + NLM_F_ACK; + req.nl_addr.nlmsg_seq = hndl->transaction_id; + hndl->transaction_id++; + + /* Get index of devname*/ + devindex = if_nametoindex(devname); + if (devindex < 0) { + *error_code = errno; + return RMNETCTL_KERNEL_ERR; + } + + /* Setup link attr with devindex as data */ + val = devindex; + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_LINK; + attrinfo->rta_len = RTA_ALIGN(RTA_LENGTH(sizeof(val))); + memcpy(RTA_DATA(attrinfo), &val, sizeof(val)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(sizeof(val))); + + /* Set up IFLA info kind RMNET that has linkinfo and type */ + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_IFNAME; + attrinfo->rta_len = RTA_ALIGN(RTA_LENGTH(strlen(vndname) + 1)); + memcpy(RTA_DATA(attrinfo), vndname, strlen(vndname) + 1); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(strlen(vndname) + 1)); + + linkinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + linkinfo->rta_type = IFLA_LINKINFO; + linkinfo->rta_len = RTA_ALIGN(RTA_LENGTH(0)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(0)); + + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_INFO_KIND; + attrinfo->rta_len = RTA_ALIGN(RTA_LENGTH(strlen(kind))); + memcpy(RTA_DATA(attrinfo), kind, strlen(kind)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(strlen(kind))); + + datainfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + datainfo->rta_type = IFLA_INFO_DATA; + datainfo->rta_len = RTA_ALIGN(RTA_LENGTH(0)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(0)); + + id = index; + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_VLAN_ID; + attrinfo->rta_len = RTA_LENGTH(sizeof(id)); + memcpy(RTA_DATA(attrinfo), &id, sizeof(id)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(sizeof(id))); + + if (flagconfig != 0) { + flags.mask = flagconfig; + flags.flags = flagconfig; + + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_VLAN_FLAGS; + attrinfo->rta_len = RTA_LENGTH(sizeof(flags)); + memcpy(RTA_DATA(attrinfo), &flags, sizeof(flags)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(sizeof(flags))); + } + + if (ul_agg_cnt > 1) { + struct rmnet_egress_agg_params agg_params; + + agg_params.agg_size = ul_agg_size; + agg_params.agg_count = ul_agg_cnt; + agg_params.agg_time = 3000000; + + attrinfo = (struct rtattr *)(((char *)&req) + + NLMSG_ALIGN(req.nl_addr.nlmsg_len)); + attrinfo->rta_type = IFLA_RMNET_UL_AGG_PARAMS; + attrinfo->rta_len = RTA_LENGTH(sizeof(agg_params)); + memcpy(RTA_DATA(attrinfo), &agg_params, sizeof(agg_params)); + req.nl_addr.nlmsg_len = NLMSG_ALIGN(req.nl_addr.nlmsg_len) + + RTA_ALIGN(RTA_LENGTH(sizeof(agg_params))); + } + + datainfo->rta_len = (char *)NLMSG_TAIL(&req.nl_addr) - (char *)datainfo; + + linkinfo->rta_len = (char *)NLMSG_TAIL(&req.nl_addr) - (char *)linkinfo; + + if (send(hndl->netlink_fd, &req, req.nl_addr.nlmsg_len, 0) < 0) { + *error_code = RMNETCTL_API_ERR_MESSAGE_SEND; + return RMNETCTL_LIB_ERR; + } + + return rmnet_get_ack(hndl, error_code); +} + +int rtrmnet_ctl_create_vnd(char *devname, char *vndname, uint8_t muxid, + uint32_t qmap_version, uint32_t ul_agg_cnt, uint32_t ul_agg_size) +{ + struct rmnetctl_hndl_s *handle; + uint16_t error_code; + int return_code; + uint32_t flagconfig = RMNET_FLAGS_INGRESS_DEAGGREGATION; + + printf("%s devname: %s, vndname: %s, muxid: %d, qmap_version: %d\n", + __func__, devname, vndname, muxid, qmap_version); + + ul_agg_cnt = 0; //TODO + + if (ul_agg_cnt > 1) + flagconfig |= RMNET_EGRESS_FORMAT_AGGREGATION; + + if (qmap_version == 9) { //QMAPV5 +#ifdef RMNET_FLAGS_INGRESS_MAP_CKSUMV5 + flagconfig |= RMNET_FLAGS_INGRESS_MAP_CKSUMV5; + flagconfig |= RMNET_FLAGS_EGRESS_MAP_CKSUMV5; +#else + return -1001; +#endif + } + else if (qmap_version == 8) { //QMAPV4 + flagconfig |= RMNET_FLAGS_INGRESS_MAP_CKSUMV4; + flagconfig |= RMNET_FLAGS_EGRESS_MAP_CKSUMV4; + } + else if (qmap_version == 5) { //QMAPV1 + } + else { + flagconfig = 0; + } + + return_code = rtrmnet_ctl_init(&handle, &error_code); + if (return_code) { + printf("rtrmnet_ctl_init error_code: %d, return_code: %d, errno: %d (%s)\n", + error_code, return_code, errno, strerror(errno)); + } + if (return_code == RMNETCTL_SUCCESS) { + return_code = rtrmnet_ctl_newvnd(handle, devname, vndname, &error_code, + muxid, flagconfig, ul_agg_cnt, ul_agg_size); + if (return_code) { + printf("rtrmnet_ctl_newvnd error_code: %d, return_code: %d, errno: %d (%s)\n", + error_code, return_code, errno, strerror(errno)); + } + rtrmnet_ctl_deinit(handle); + } + + return return_code; +} diff --git a/wwan/app/quectel_cm_5G/src/udhcpc.c b/wwan/app/quectel_cm_5G/src/udhcpc.c new file mode 100644 index 0000000..8ab381e --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/udhcpc.c @@ -0,0 +1,745 @@ +/****************************************************************************** + @file udhcpc.c + @brief call DHCP tools to obtain IP address. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "util.h" +#include "QMIThread.h" +extern int ql_get_netcard_carrier_state(const char *devname); + +static __inline in_addr_t qmi2addr(uint32_t __x) { + return (__x>>24) | (__x>>8&0xff00) | (__x<<8&0xff0000) | (__x<<24); +} + +static int ql_system(const char *shell_cmd) { + dbg_time("%s", shell_cmd); + return system(shell_cmd); +} + +static void ifc_init_ifr(const char *name, struct ifreq *ifr) +{ + memset(ifr, 0, sizeof(struct ifreq)); + no_trunc_strncpy(ifr->ifr_name, name, IFNAMSIZ); + ifr->ifr_name[IFNAMSIZ - 1] = 0; +} + +static void ql_set_mtu(const char *ifname, int ifru_mtu) { + int inet_sock; + struct ifreq ifr; + + inet_sock = socket(AF_INET, SOCK_DGRAM, 0); + + if (inet_sock > 0) { + ifc_init_ifr(ifname, &ifr); + + if (!ioctl(inet_sock, SIOCGIFMTU, &ifr)) { + if (ifr.ifr_ifru.ifru_mtu != ifru_mtu) { + dbg_time("change mtu %d -> %d", ifr.ifr_ifru.ifru_mtu , ifru_mtu); + ifr.ifr_ifru.ifru_mtu = ifru_mtu; + ioctl(inet_sock, SIOCSIFMTU, &ifr); + } + } + + close(inet_sock); + } +} + +static int ifc_get_addr(const char *name, in_addr_t *addr) +{ + int inet_sock; + struct ifreq ifr; + int ret = 0; + + inet_sock = socket(AF_INET, SOCK_DGRAM, 0); + + ifc_init_ifr(name, &ifr); + if (addr != NULL) { + ret = ioctl(inet_sock, SIOCGIFADDR, &ifr); + if (ret < 0) { + *addr = 0; + } else { + *addr = ((struct sockaddr_in*) &ifr.ifr_addr)->sin_addr.s_addr; + } + } + close(inet_sock); + return ret; +} + +static short ifc_get_flags(const char *ifname) +{ + int inet_sock; + struct ifreq ifr; + int ret = 0; + + inet_sock = socket(AF_INET, SOCK_DGRAM, 0); + + if (inet_sock > 0) { + ifc_init_ifr(ifname, &ifr); + + if (!ioctl(inet_sock, SIOCGIFFLAGS, &ifr)) { + ret = ifr.ifr_ifru.ifru_flags; + } + + close(inet_sock); + } + + return ret; +} + +static void ifc_set_state(const char *ifname, int state) { + char shell_cmd[128]; + + if (!access("/sbin/ip", X_OK)) { + snprintf(shell_cmd, sizeof(shell_cmd), "ip link set dev %s %s", ifname, state ? "up" : "down"); + } else { + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s %s", ifname, state ? "up" : "down"); + } + ql_system(shell_cmd); +} + +static int ql_netcard_ipv4_address_check(const char *ifname, in_addr_t ip) { + in_addr_t addr = 0; + + ifc_get_addr(ifname, &addr); + return addr == ip; +} + +static int ql_raw_ip_mode_check(const char *ifname, uint32_t ip) { + int fd; + char raw_ip[128]; + char mode[2] = "X"; + int mode_change = 0; + + if (ql_netcard_ipv4_address_check(ifname, qmi2addr(ip))) + return 0; + + snprintf(raw_ip, sizeof(raw_ip), "/sys/class/net/%s/qmi/raw_ip", ifname); + if (access(raw_ip, F_OK)) + return 0; + + fd = open(raw_ip, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (fd < 0) { + dbg_time("%s %d fail to open(%s), errno:%d (%s)", __FILE__, __LINE__, raw_ip, errno, strerror(errno)); + return 0; + } + + if (read(fd, mode, 2) == -1) {}; + if (mode[0] == '0' || mode[0] == 'N') { + dbg_time("File:%s Line:%d udhcpc fail to get ip address, try next:", __func__, __LINE__); + ifc_set_state(ifname, 0); + dbg_time("echo Y > /sys/class/net/%s/qmi/raw_ip", ifname); + mode[0] = 'Y'; + if (write(fd, mode, 2) == -1) {}; + mode_change = 1; + ifc_set_state(ifname, 1); + } + + close(fd); + return mode_change; +} + +static void* udhcpc_thread_function(void* arg) { + FILE * udhcpc_fp; + char *udhcpc_cmd = (char *)arg; + + if (udhcpc_cmd == NULL) + return NULL; + + dbg_time("%s", udhcpc_cmd); + udhcpc_fp = popen(udhcpc_cmd, "r"); + free(udhcpc_cmd); + if (udhcpc_fp) { + char buf[0xff]; + + buf[sizeof(buf)-1] = '\0'; + while((fgets(buf, sizeof(buf)-1, udhcpc_fp)) != NULL) { + if ((strlen(buf) > 1) && (buf[strlen(buf) - 1] == '\n')) + buf[strlen(buf) - 1] = '\0'; + dbg_time("%s", buf); + } + + pclose(udhcpc_fp); + } + + return NULL; +} + +//#define USE_DHCLIENT +#ifdef USE_DHCLIENT +static int dhclient_alive = 0; +#endif +static int dibbler_client_alive = 0; + +void ql_set_driver_link_state(PROFILE_T *profile, int link_state) { + char link_file[128]; + int fd; + int new_state = 0; + + snprintf(link_file, sizeof(link_file), "/sys/class/net/%s/link_state", profile->usbnet_adapter); + fd = open(link_file, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (fd == -1) { + if (errno != ENOENT) + dbg_time("Fail to access %s, errno: %d (%s)", link_file, errno, strerror(errno)); + return; + } + + if (profile->qmap_mode <= 1) + new_state = !!link_state; + else { + //0x80 means link off this pdp + new_state = (link_state ? 0x00 : 0x80) + (profile->muxid & 0x7F); + } + + snprintf(link_file, sizeof(link_file), "%d\n", new_state); + if (write(fd, link_file, sizeof(link_file)) == -1) {}; + + if (link_state == 0 && profile->qmapnet_adapter[0] + && strcmp(profile->qmapnet_adapter, profile->usbnet_adapter)) { + size_t rc; + + lseek(fd, 0, SEEK_SET); + rc = read(fd, link_file, sizeof(link_file)); + if (rc > 1 && (!strncasecmp(link_file, "0\n", 2) || !strncasecmp(link_file, "0x0\n", 4))) { + ifc_set_state(profile->usbnet_adapter, 0); + } + } + + close(fd); +} + +static const char *ipv4Str(const uint32_t Address) { + static char str[] = {"255.225.255.255"}; + uint8_t *ip = (uint8_t *)&Address; + + snprintf(str, sizeof(str), "%d.%d.%d.%d", ip[3], ip[2], ip[1], ip[0]); + return str; +} + +static const char *ipv6Str(const UCHAR Address[16]) { + static char str[64]; + uint16_t ip[8]; + int i; + for (i = 0; i < 8; i++) { + ip[i] = (Address[i*2]<<8) + Address[i*2+1]; + } + + snprintf(str, sizeof(str), "%x:%x:%x:%x:%x:%x:%x:%x", + ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7]); + + return str; +} + +void update_ipv4_address(const char *ifname, const char *ip, const char *gw, unsigned prefix) +{ + char shell_cmd[128]; + + if (!ifname) + return; + + if (!access("/sbin/ip", X_OK)) { + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address flush dev %s", 4, ifname); + ql_system(shell_cmd); + + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address add %s/%u dev %s", 4, ip, prefix, ifname); + ql_system(shell_cmd); + + //ping6 www.qq.com + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d route add default via %s dev %s", 4, gw, ifname); + ql_system(shell_cmd); + } else { + unsigned n = (0xFFFFFFFF >> (32 - prefix)) << (32 - prefix); + // n = (n>>24) | (n>>8&0xff00) | (n<<8&0xff0000) | (n<<24); + + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s %s netmask %s", ifname, ip, ipv4Str(n)); + ql_system(shell_cmd); + + //Resetting default routes + snprintf(shell_cmd, sizeof(shell_cmd), "route del default dev %s", ifname); + while(!system(shell_cmd)); + + snprintf(shell_cmd, sizeof(shell_cmd), "route add default gw %s dev %s", gw, ifname); + ql_system(shell_cmd); + } +} + +void update_ipv6_address(const char *ifname, const char *ip, const char *gw, unsigned prefix) { + char shell_cmd[128]; + + (void)gw; + if (!access("/sbin/ip", X_OK)) { + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address flush dev %s", 6, ifname); + ql_system(shell_cmd); + + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d address add %s/%u dev %s", 6, ip, prefix, ifname); + ql_system(shell_cmd); + + //ping6 www.qq.com + snprintf(shell_cmd, sizeof(shell_cmd), "ip -%d route add default dev %s", 6, ifname); + ql_system(shell_cmd); + } else { + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s %s/%d", ifname, ip, prefix); + ql_system(shell_cmd); + + snprintf(shell_cmd, sizeof(shell_cmd), "route -A inet6 add default dev %s", ifname); + ql_system(shell_cmd); + } +} + +static void update_ip_address_by_qmi(const char *ifname, const IPV4_T *ipv4, const IPV6_T *ipv6) { + char *d1, *d2; + + if (ipv4 && ipv4->Address) { + d1 = strdup(ipv4Str(ipv4->Address)); + d2 = strdup(ipv4Str(ipv4->Gateway)); + unsigned prefix = 0; + unsigned n = 0; + + for (n = 0; n < 32; n++) { + if (ipv4->SubnetMask&((unsigned)1<DnsPrimary) { + d1 = strdup(ipv4Str(ipv4->DnsPrimary)); + d2 = strdup(ipv4Str(ipv4->DnsSecondary ? ipv4->DnsSecondary : ipv4->DnsPrimary)); + update_resolv_conf(4, ifname, d1, d2); + free(d1); free(d2); + } + } + + if (ipv6 && ipv6->Address[0] && ipv6->PrefixLengthIPAddr) { + d1 = strdup(ipv6Str(ipv6->Address)); + d2 = strdup(ipv6Str(ipv6->Gateway)); + + update_ipv6_address(ifname, d1, d2, ipv6->PrefixLengthIPAddr); + free(d1); free(d2); + + //Adding DNS + if (ipv6->DnsPrimary[0]) { + d1 = strdup(ipv6Str(ipv6->DnsPrimary)); + d2 = strdup(ipv6Str(ipv6->DnsSecondary[0] ? ipv6->DnsSecondary : ipv6->DnsPrimary)); + update_resolv_conf(6, ifname, d1, d2); + free(d1); free(d2); + } + } +} + +//#define QL_OPENWER_NETWORK_SETUP +#ifdef QL_OPENWER_NETWORK_SETUP +static const char *openwrt_lan = "br-lan"; +static const char *openwrt_wan = "wwan0"; + +static int ql_openwrt_system(const char *cmd) { + int i; + int ret = 1; + char shell_cmd[128]; + + snprintf(shell_cmd, sizeof(shell_cmd), "%s 2>1 > /dev/null", cmd); + + for (i = 0; i < 15; i++) { + dbg_time("%s", cmd); + ret = system(shell_cmd); + if (!ret) + break; + sleep(1); + } + + return ret; +} + +static int ql_openwrt_is_wan(const char *ifname) { + if (openwrt_lan == NULL) { + system("uci show network.wan.ifname"); + } + + if (strcmp(ifname, openwrt_wan)) + return 0; + + return 1; +} + +static void ql_openwrt_setup_wan(const char *ifname, const IPV4_T *ipv4) { + FILE *fp = NULL; + char config[64]; + + snprintf(config, sizeof(config), "/tmp/rmnet_%s_ipv4config", ifname); + + if (ipv4 == NULL) { + if (ql_openwrt_is_wan(ifname)) + ql_openwrt_system("ifdown wan"); + return; + } + + fp = fopen(config, "w"); + if (fp == NULL) + return; + + fprintf(fp, "IFNAME=\"%s\"\n", ifname); + fprintf(fp, "PUBLIC_IP=\"%s\"\n", ipv4Str(ipv4->Address)); + fprintf(fp, "NETMASK=\"%s\"\n", ipv4Str(ipv4->SubnetMask)); + fprintf(fp, "GATEWAY=\"%s\"\n", ipv4Str(ipv4->Gateway)); + fprintf(fp, "DNSSERVERS=\"%s", ipv4Str(ipv4->DnsPrimary)); + if (ipv4->DnsSecondary != 0) + fprintf(fp, " %s", ipv4Str(ipv4->DnsSecondary)); + fprintf(fp, "\"\n"); + + fclose(fp); + + if (!ql_openwrt_is_wan(ifname)) + return; + + ql_openwrt_system("ifup wan"); +} + +static void ql_openwrt_setup_wan6(const char *ifname, const IPV6_T *ipv6) { + FILE *fp = NULL; + char config[64]; + int first_ifup; + + snprintf(config, sizeof(config), "/tmp/rmnet_%s_ipv6config", ifname); + + if (ipv6 == NULL) { + if (ql_openwrt_is_wan(ifname)) + ql_openwrt_system("ifdown wan6"); + return; + } + + first_ifup = (access(config, F_OK) != 0); + + fp = fopen(config, "w"); + if (fp == NULL) + return; + + fprintf(fp, "IFNAME=\"%s\"\n", ifname); + fprintf(fp, "PUBLIC_IP=\"%s\"\n", ipv6Str(ipv6->Address)); + fprintf(fp, "NETMASK=\"%s\"\n", ipv6Str(ipv6->SubnetMask)); + fprintf(fp, "GATEWAY=\"%s\"\n", ipv6Str(ipv6->Gateway)); + fprintf(fp, "PrefixLength=\"%d\"\n", ipv6->PrefixLengthIPAddr); + fprintf(fp, "DNSSERVERS=\"%s", ipv6Str(ipv6->DnsPrimary)); + if (ipv6->DnsSecondary[0]) + fprintf(fp, " %s", ipv6Str(ipv6->DnsSecondary)); + fprintf(fp, "\"\n"); + + fclose(fp); + + if (!ql_openwrt_is_wan(ifname)) + return; + + if (first_ifup) + ql_openwrt_system("ifup wan6"); + else + ql_openwrt_system("/etc/init.d/network restart"); //make PC to release old IPV6 address, and RS new IPV6 address + +#if 1 //TODO? why need this? + if (openwrt_lan) { + int i; + char shell_cmd[128]; + UCHAR Address[16] = {0}; + + ql_openwrt_system(("ifstatus lan")); + + for (i = 0; i < (ipv6->PrefixLengthIPAddr/8); i++) + Address[i] = ipv6->Address[i]; + + snprintf(shell_cmd, sizeof(shell_cmd), "ip route del %s/%u dev %s", ipv6Str(Address), ipv6->PrefixLengthIPAddr, ifname); + ql_openwrt_system(shell_cmd); + + snprintf(shell_cmd, sizeof(shell_cmd), "ip route add %s/%u dev %s", ipv6Str(Address), ipv6->PrefixLengthIPAddr, openwrt_lan); + ql_system(shell_cmd); + } +#endif +} +#endif + +void udhcpc_start(PROFILE_T *profile) { + char *ifname = profile->usbnet_adapter; + + ql_set_driver_link_state(profile, 1); + + if (profile->qmapnet_adapter[0]) { + ifname = profile->qmapnet_adapter; + } + + if (profile->rawIP && profile->ipv4.Address && profile->ipv4.Mtu) { + ql_set_mtu(ifname, (profile->ipv4.Mtu)); + } + + if (strcmp(ifname, profile->usbnet_adapter)) { + ifc_set_state(profile->usbnet_adapter, 1); + if (ifc_get_flags(ifname)&IFF_UP) { + ifc_set_state(ifname, 0); + } + } + + ifc_set_state(ifname, 1); + if (profile->ipv4.Address) { + if (profile->PCSCFIpv4Addr1) + dbg_time("pcscf1: %s", ipv4Str(profile->PCSCFIpv4Addr1)); + if (profile->PCSCFIpv4Addr2) + dbg_time("pcscf2: %s", ipv4Str(profile->PCSCFIpv4Addr2)); + } + + if (profile->ipv6.Address[0] && profile->ipv6.PrefixLengthIPAddr) { + if (profile->PCSCFIpv6Addr1[0]) + dbg_time("pcscf1: %s", ipv6Str(profile->PCSCFIpv6Addr1)); + if (profile->PCSCFIpv6Addr2[0]) + dbg_time("pcscf2: %s", ipv6Str(profile->PCSCFIpv6Addr2)); + } + +#if 1 //for bridge mode, only one public IP, so do udhcpc manually + if (ql_bridge_mode_detect(profile)) { + return; + } +#endif + +//because must use udhcpc to obtain IP when working on ETH mode, +//so it is better also use udhcpc to obtain IP when working on IP mode. +//use the same policy for all modules +#if 0 + if (profile->rawIP != 0) //mdm9x07/ec25,ec20 R2.0 + { + update_ip_address_by_qmi(ifname, &profile->ipv4, &profile->ipv6); + return; + } +#endif + + if (profile->ipv4.Address == 0) + goto set_ipv6; + + if (profile->no_dhcp || profile->request_ops == &mbim_request_ops) { //lots of mbim modem do not support DHCP + update_ip_address_by_qmi(ifname, &profile->ipv4, NULL); + } + else +/* Do DHCP using busybox tools */ + { + char udhcpc_cmd[128]; + pthread_attr_t udhcpc_thread_attr; + pthread_t udhcpc_thread_id; + + pthread_attr_init(&udhcpc_thread_attr); + pthread_attr_setdetachstate(&udhcpc_thread_attr, PTHREAD_CREATE_DETACHED); + +#ifdef USE_DHCLIENT + snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "dhclient -4 -d --no-pid %s", ifname); + dhclient_alive++; +#else + if (access("/usr/share/udhcpc/default.script", X_OK) + && access("/etc//udhcpc/default.script", X_OK)) { + dbg_time("No default.script found, it should be in '/usr/share/udhcpc/' or '/etc//udhcpc' depend on your udhcpc version!"); + } + + //-f,--foreground Run in foreground + //-b,--background Background if lease is not obtained + //-n,--now Exit if lease is not obtained + //-q,--quit Exit after obtaining lease + //-t,--retries N Send up to N discover packets (default 3) + snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "busybox udhcpc -f -n -q -t 5 -i %s", ifname); +#endif + +#if 1 //for OpenWrt + if (!access("/lib/netifd/dhcp.script", X_OK) && !access("/sbin/ifup", X_OK) && !access("/sbin/ifstatus", X_OK)) { +#if 0 //20210415 do not promot these message + dbg_time("you are use OpenWrt?"); + dbg_time("should not calling udhcpc manually?"); + dbg_time("should modify /etc/config/network as below?"); + dbg_time("config interface wan"); + dbg_time("\toption ifname %s", ifname); + dbg_time("\toption proto dhcp"); + dbg_time("should use \"/sbin/ifstaus wan\" to check %s 's status?", ifname); +#endif + } +#endif + +#ifdef USE_DHCLIENT + pthread_create(&udhcpc_thread_id, &udhcpc_thread_attr, udhcpc_thread_function, (void*)strdup(udhcpc_cmd)); + sleep(1); +#else + pthread_create(&udhcpc_thread_id, NULL, udhcpc_thread_function, (void*)strdup(udhcpc_cmd)); + pthread_join(udhcpc_thread_id, NULL); + + if (profile->request_ops == &atc_request_ops) { + profile->udhcpc_ip = 0; + ifc_get_addr(ifname, &profile->udhcpc_ip); + if (profile->udhcpc_ip != profile->ipv4.Address) { + unsigned char *l = (unsigned char *)&profile->udhcpc_ip; + unsigned char *r = (unsigned char *)&profile->ipv4.Address; + dbg_time("ERROR: IP from udhcpc (%d.%d.%d.%d) is different to IP from ATC (%d.%d.%d.%d)!", + l[0], l[1], l[2], l[3], r[0], r[1], r[2], r[3]); + ql_get_netcard_carrier_state(ifname); //miss udhcpc default.script or modem not report usb-net-cdc-linkup + } + } + + if (profile->request_ops != &qmi_request_ops) { //only QMI modem support next fixup! + goto set_ipv6; + } + + if (ql_raw_ip_mode_check(ifname, profile->ipv4.Address)) { + pthread_create(&udhcpc_thread_id, NULL, udhcpc_thread_function, (void*)strdup(udhcpc_cmd)); + pthread_join(udhcpc_thread_id, NULL); + } + + if (!ql_netcard_ipv4_address_check(ifname, qmi2addr(profile->ipv4.Address))) { + //no udhcpc's default.script exist, directly set ip and dns + update_ip_address_by_qmi(ifname, &profile->ipv4, NULL); + } + //Add by Demon. check default route + FILE *rt_fp = NULL; + char rt_cmd[128] = {0}; + + //Check if there is a default route. + snprintf(rt_cmd, sizeof(rt_cmd), "route -n | grep %s | awk '{print $1}' | grep 0.0.0.0", ifname); + rt_fp = popen((const char *)rt_cmd, "r"); + if (rt_fp != NULL) { + char buf[20] = {0}; + int found_default_rt = 0; + + if (fgets(buf, sizeof(buf), rt_fp) != NULL) { + //Find the specified interface + found_default_rt = 1; + } + + if (1 == found_default_rt) { + //dbg_time("Route items found for %s", ifname); + } + else { + dbg_time("Warning: No route items found for %s", ifname); + } + + pclose(rt_fp); + } + //End by Demon. +#endif + } + +#ifdef QL_OPENWER_NETWORK_SETUP + ql_openwrt_setup_wan(ifname, &profile->ipv4); +#endif + +set_ipv6: + if (profile->ipv6.Address[0] && profile->ipv6.PrefixLengthIPAddr) { +#if 1 + //module do not support DHCPv6, only support 'Router Solicit' + //and it seem if enable /proc/sys/net/ipv6/conf/all/forwarding, Kernel do not send RS + const char *forward_file = "/proc/sys/net/ipv6/conf/all/forwarding"; + int forward_fd = open(forward_file, O_RDONLY); + if (forward_fd > 0) { + char forward_state[2]; + if (read(forward_fd, forward_state, 2) == -1) {}; + if (forward_state[0] == '1') { + //dbg_time("%s enabled, kernel maybe donot send 'Router Solicit'", forward_file); + } + close(forward_fd); + } + + update_ip_address_by_qmi(ifname, NULL, &profile->ipv6); + + if (profile->ipv6.DnsPrimary[0] || profile->ipv6.DnsSecondary[0]) { + char dns1str[64], dns2str[64]; + + if (profile->ipv6.DnsPrimary[0]) { + strcpy(dns1str, ipv6Str(profile->ipv6.DnsPrimary)); + } + + if (profile->ipv6.DnsSecondary[0]) { + strcpy(dns2str, ipv6Str(profile->ipv6.DnsSecondary)); + } + + update_resolv_conf(6, ifname, profile->ipv6.DnsPrimary[0] ? dns1str : NULL, + profile->ipv6.DnsSecondary[0] != '\0' ? dns2str : NULL); + } + +#ifdef QL_OPENWER_NETWORK_SETUP + ql_openwrt_setup_wan6(ifname, &profile->ipv6); +#endif +#else +#ifdef USE_DHCLIENT + snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "dhclient -6 -d --no-pid %s", ifname); + dhclient_alive++; +#else + /* + DHCPv6: Dibbler - a portable DHCPv6 + 1. download from http://klub.com.pl/dhcpv6/ + 2. cross-compile + 2.1 ./configure --host=arm-linux-gnueabihf + 2.2 copy dibbler-client to your board + 3. mkdir -p /var/log/dibbler/ /var/lib/ on your board + 4. create /etc/dibbler/client.conf on your board, the content is + log-mode short + log-level 7 + iface wwan0 { + ia + option dns-server + } + 5. run "dibbler-client start" to get ipV6 address + 6. run "route -A inet6 add default dev wwan0" to add default route + */ + snprintf(shell_cmd, sizeof(shell_cmd), "route -A inet6 add default %s", ifname); + ql_system(shell_cmd); + snprintf(udhcpc_cmd, sizeof(udhcpc_cmd), "dibbler-client run"); + dibbler_client_alive++; +#endif + + pthread_create(&udhcpc_thread_id, &udhcpc_thread_attr, udhcpc_thread_function, (void*)strdup(udhcpc_cmd)); +#endif + } +} + +void udhcpc_stop(PROFILE_T *profile) { + char *ifname = profile->usbnet_adapter; + char shell_cmd[128]; + + ql_set_driver_link_state(profile, 0); + + if (profile->qmapnet_adapter[0]) { + ifname = profile->qmapnet_adapter; + } + +#ifdef USE_DHCLIENT + if (dhclient_alive) { + system("killall dhclient"); + dhclient_alive = 0; + } +#endif + if (dibbler_client_alive) { + if (system("killall dibbler-client")) {}; + dibbler_client_alive = 0; + } + + profile->udhcpc_ip = 0; +//it seems when call netif_carrier_on(), and netcard 's IP is "0.0.0.0", will cause netif_queue_stopped() + if (!access("/sbin/ip", X_OK)) + snprintf(shell_cmd, sizeof(shell_cmd), "ip addr flush dev %s", ifname); + else + snprintf(shell_cmd, sizeof(shell_cmd), "ifconfig %s 0.0.0.0", ifname); + ql_system(shell_cmd); + ifc_set_state(ifname, 0); + +#ifdef QL_OPENWER_NETWORK_SETUP + ql_openwrt_setup_wan(ifname, NULL); + ql_openwrt_setup_wan6(ifname, NULL); +#endif +} diff --git a/wwan/app/quectel_cm_5G/src/udhcpc_netlink.c b/wwan/app/quectel_cm_5G/src/udhcpc_netlink.c new file mode 100644 index 0000000..5e05223 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/udhcpc_netlink.c @@ -0,0 +1,179 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libmnl/ifutils.h" +#include "libmnl/dhcp/dhcp.h" +#include "util.h" +#include "QMIThread.h" + +static int ql_raw_ip_mode_check(const char *ifname) +{ + int fd; + char raw_ip[128]; + char mode[2] = "X"; + int mode_change = 0; + + snprintf(raw_ip, sizeof(raw_ip), "/sys/class/net/%s/qmi/raw_ip", ifname); + if (access(raw_ip, F_OK)) + return 0; + + fd = open(raw_ip, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (fd < 0) + { + dbg_time("%s %d fail to open(%s), errno:%d (%s)", __FILE__, __LINE__, raw_ip, errno, strerror(errno)); + return 0; + } + + read(fd, mode, 2); + if (mode[0] == '0' || mode[0] == 'N') + { + if_link_down(ifname); + dbg_time("echo Y > /sys/class/net/%s/qmi/raw_ip", ifname); + mode[0] = 'Y'; + write(fd, mode, 2); + mode_change = 1; + if_link_up(ifname); + } + + close(fd); + return mode_change; +} + +void ql_set_driver_link_state(PROFILE_T *profile, int link_state) +{ + char link_file[128]; + int fd; + int new_state = 0; + + snprintf(link_file, sizeof(link_file), "/sys/class/net/%s/link_state", profile->usbnet_adapter); + fd = open(link_file, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (fd == -1) + { + if (errno != ENOENT) + dbg_time("Fail to access %s, errno: %d (%s)", link_file, errno, strerror(errno)); + return; + } + + if (profile->qmap_mode <= 1) + new_state = !!link_state; + else + { + //0x80 means link off this pdp + new_state = (link_state ? 0x00 : 0x80) + profile->pdp; + } + + snprintf(link_file, sizeof(link_file), "%d\n", new_state); + write(fd, link_file, sizeof(link_file)); + + if (link_state == 0 && profile->qmap_mode > 1) + { + size_t rc; + + lseek(fd, 0, SEEK_SET); + rc = read(fd, link_file, sizeof(link_file)); + if (rc > 1 && (!strcasecmp(link_file, "0\n") || !strcasecmp(link_file, "0x0\n"))) + { + if_link_down(profile->usbnet_adapter); + } + } + + close(fd); +} + +void udhcpc_start(PROFILE_T *profile) +{ + char *ifname = profile->usbnet_adapter; + + ql_set_driver_link_state(profile, 1); + ql_raw_ip_mode_check(ifname); + + if (profile->qmapnet_adapter) + { + ifname = profile->qmapnet_adapter; + } + if (profile->rawIP && profile->ipv4.Address && profile->ipv4.Mtu) + { + if_set_mtu(ifname, (profile->ipv4.Mtu)); + } + + if (strcmp(ifname, profile->usbnet_adapter)) + { + if_link_up(profile->usbnet_adapter); + } + + if_link_up(ifname); + +#if 1 //for bridge mode, only one public IP, so do udhcpc manually + if (ql_bridge_mode_detect(profile)) + { + return; + } +#endif + // if use DHCP(should make with ${DHCP} src files) + // do_dhcp(ifname); + // return 0; + /* IPv4 Addr Info */ + if (profile->ipv4.Address) + { + dbg_time("IPv4 MTU: %d", profile->ipv4.Mtu); + dbg_time("IPv4 Address: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.Address))); + dbg_time("IPv4 Netmask: %d", mask_to_prefix_v4(ntohl(profile->ipv4.SubnetMask))); + dbg_time("IPv4 Gateway: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.Gateway))); + dbg_time("IPv4 DNS1: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.DnsPrimary))); + dbg_time("IPv4 DNS2: %s", ipaddr_to_string_v4(ntohl(profile->ipv4.DnsSecondary))); + if_set_network_v4(ifname, ntohl(profile->ipv4.Address), + mask_to_prefix_v4(profile->ipv4.SubnetMask), + ntohl(profile->ipv4.Gateway), + ntohl(profile->ipv4.DnsPrimary), + ntohl(profile->ipv4.DnsSecondary)); + } + + if (profile->ipv6.Address[0] && profile->ipv6.PrefixLengthIPAddr) + { + //module do not support DHCPv6, only support 'Router Solicit' + //and it seem if enable /proc/sys/net/ipv6/conf/all/forwarding, Kernel do not send RS + const char *forward_file = "/proc/sys/net/ipv6/conf/all/forwarding"; + int forward_fd = open(forward_file, O_RDONLY); + if (forward_fd > 0) + { + char forward_state[2]; + read(forward_fd, forward_state, 2); + if (forward_state[0] == '1') + { + dbg_time("%s enabled, kernel maybe donot send 'Router Solicit'", forward_file); + } + close(forward_fd); + } + + dbg_time("IPv6 MTU: %d", profile->ipv6.Mtu); + dbg_time("IPv6 Address: %s", ipaddr_to_string_v6(profile->ipv6.Address)); + dbg_time("IPv6 Netmask: %d", profile->ipv6.PrefixLengthIPAddr); + dbg_time("IPv6 Gateway: %s", ipaddr_to_string_v6(profile->ipv6.Gateway)); + dbg_time("IPv6 DNS1: %s", ipaddr_to_string_v6(profile->ipv6.DnsPrimary)); + dbg_time("IPv6 DNS2: %s", ipaddr_to_string_v6(profile->ipv6.DnsSecondary)); + if_set_network_v6(ifname, profile->ipv6.Address, profile->ipv6.PrefixLengthIPAddr, + profile->ipv6.Gateway, profile->ipv6.DnsPrimary, profile->ipv6.DnsSecondary); + } +} + +void udhcpc_stop(PROFILE_T *profile) +{ + char *ifname = profile->usbnet_adapter; + + ql_set_driver_link_state(profile, 0); + + if (profile->qmapnet_adapter) + { + ifname = profile->qmapnet_adapter; + } + + if_link_down(ifname); + if_flush_v4_addr(ifname); + if_flush_v6_addr(ifname); +} diff --git a/wwan/app/quectel_cm_5G/src/udhcpc_script.c b/wwan/app/quectel_cm_5G/src/udhcpc_script.c new file mode 100644 index 0000000..3e164a4 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/udhcpc_script.c @@ -0,0 +1,132 @@ +/****************************************************************************** + @file udhcpc.c + @brief call DHCP tools to obtain IP address. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "util.h" +#include "QMIThread.h" + +#define IFDOWN_SCRIPT "/etc/quectel/ifdown.sh" +#define IFUP_SCRIPT "/etc/quectel/ifup.sh" + +static int ql_system(const char *shell_cmd) +{ + dbg_time("%s", shell_cmd); + return system(shell_cmd); +} + +uint32_t mask_to_prefix_v4(uint32_t mask) +{ + uint32_t prefix = 0; + while (mask) + { + mask = mask & (mask - 1); + prefix++; + } + return prefix; +} + +uint32_t mask_from_prefix_v4(uint32_t prefix) +{ + return ~((1 << (32 - prefix)) - 1); +} + +/* mask in int */ +uint32_t broadcast_from_mask(uint32_t ip, uint32_t mask) +{ + return (ip & mask) | (~mask); +} + +const char *ipaddr_to_string_v4(in_addr_t ipaddr, char *buf, size_t size) +{ + // static char buf[INET6_ADDRSTRLEN] = {'\0'}; + buf[0] = '\0'; + uint32_t addr = ipaddr; + return inet_ntop(AF_INET, &addr, buf, size); +} + +const char *ipaddr_to_string_v6(uint8_t *ipaddr, char *buf, size_t size) +{ + buf[0] = '\0'; + return inet_ntop(AF_INET6, ipaddr, buf, size); +} + +/** + * For more details see default.script + * + * The main aim of this function is offload ip management to script, CM has not interest in manage IP address. + * just tell script all the info about ip, mask, router, dns... + */ +void udhcpc_start(PROFILE_T *profile) +{ + char shell_cmd[1024]; + char ip[128]; + char subnet[128]; + char broadcast[128]; + char router[128]; + char domain1[128]; + char domain2[128]; + + if (NULL == getenv(IFUP_SCRIPT)) + return; + + // manage IPv4??? + // check rawip ??? + snprintf(shell_cmd, sizeof(shell_cmd), + " netiface=%s interface=%s mtu=%u ip=%s subnet=%s broadcast=%s router=%s" + " domain=\"%s %s\" %s", + profile->usbnet_adapter, + profile->qmapnet_adapter ? profile->qmapnet_adapter : profile->usbnet_adapter, + profile->ipv4.Mtu, + ipaddr_to_string_v4(ntohl(profile->ipv4.Address), ip, sizeof(ip)), + ipaddr_to_string_v4(ntohl(profile->ipv4.SubnetMask), subnet, sizeof(subnet)), + ipaddr_to_string_v4(ntohl(broadcast_from_mask(profile->ipv4.Address, profile->ipv4.SubnetMask)), + broadcast, sizeof(broadcast)), + ipaddr_to_string_v4(ntohl(profile->ipv4.Gateway), router, sizeof(router)), + ipaddr_to_string_v4(ntohl(profile->ipv4.DnsPrimary), domain1, sizeof(domain1)), + ipaddr_to_string_v4(ntohl(profile->ipv4.DnsSecondary), domain2, sizeof(domain2)), + getenv(IFUP_SCRIPT)); + ql_system(shell_cmd); + + // manage IPv6??? +} + +/** + * For more details see default.script + * + * The main aim of this function is offload ip management to script, CM has not interest in manage IP address. + * just tell script all the info about ip, mask, router, dns... + */ +void udhcpc_stop(PROFILE_T *profile) +{ + char shell_cmd[1024]; + + if (NULL == getenv(IFDOWN_SCRIPT)) + return; + + snprintf(shell_cmd, sizeof(shell_cmd), + "netiface=%s interface=%s %s", + profile->usbnet_adapter, + profile->qmapnet_adapter ? profile->qmapnet_adapter : profile->usbnet_adapter, + getenv(IFDOWN_SCRIPT)); + ql_system(shell_cmd); +} diff --git a/wwan/app/quectel_cm_5G/src/util.c b/wwan/app/quectel_cm_5G/src/util.c new file mode 100644 index 0000000..c84b076 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/util.c @@ -0,0 +1,361 @@ +/****************************************************************************** + @file util.c + @brief some utils for this QCM tool. + + DESCRIPTION + Connectivity Management Tool for USB network adapter of Quectel wireless cellular modules. + + INITIALIZATION AND SEQUENCING REQUIREMENTS + None. + + --------------------------------------------------------------------------- + Copyright (c) 2016 - 2023 Quectel Wireless Solution, Co., Ltd. All Rights Reserved. + Quectel Wireless Solution Proprietary and Confidential. + --------------------------------------------------------------------------- +******************************************************************************/ + +#include +#include +typedef unsigned short sa_family_t; +#include + +#if defined(__STDC__) +#include +#define __V(x) x +#else +#include +#define __V(x) (va_alist) va_dcl +#define const +#define volatile +#endif + +#include + +#include "QMIThread.h" + +pthread_mutex_t cm_command_mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t cm_command_cond = PTHREAD_COND_INITIALIZER; +unsigned int cm_recv_buf[1024]; + +int cm_open_dev(const char *dev) { + int fd; + + fd = open(dev, O_RDWR | O_NONBLOCK | O_NOCTTY); + if (fd != -1) { + fcntl(fd, F_SETFL, fcntl(fd,F_GETFL) | O_NONBLOCK); + fcntl(fd, F_SETFD, FD_CLOEXEC); + + if (!strncmp(dev, "/dev/tty", strlen("/dev/tty"))) + { + //disable echo on serial ports + struct termios ios; + + memset(&ios, 0, sizeof(ios)); + tcgetattr( fd, &ios ); + cfmakeraw(&ios); + cfsetispeed(&ios, B115200); + cfsetospeed(&ios, B115200); + tcsetattr( fd, TCSANOW, &ios ); + tcflush(fd, TCIOFLUSH); + } + } else { + dbg_time("Failed to open %s, errno: %d (%s)", dev, errno, strerror(errno)); + } + + return fd; +} + +int cm_open_proxy(const char *name) { + int sockfd = -1; + int reuse_addr = 1; + struct sockaddr_un sockaddr; + socklen_t alen; + + /*Create server socket*/ + sockfd = socket(AF_LOCAL, SOCK_STREAM, 0); + if (sockfd < 0) + return sockfd; + + memset(&sockaddr, 0, sizeof(sockaddr)); + sockaddr.sun_family = AF_LOCAL; + sockaddr.sun_path[0] = 0; + memcpy(sockaddr.sun_path + 1, name, strlen(name) ); + + alen = strlen(name) + offsetof(struct sockaddr_un, sun_path) + 1; + if(connect(sockfd, (struct sockaddr *)&sockaddr, alen) < 0) { + close(sockfd); + dbg_time("connect %s errno: %d (%s)", name, errno, strerror(errno)); + return -1; + } + setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &reuse_addr,sizeof(reuse_addr)); + fcntl(sockfd, F_SETFL, fcntl(sockfd,F_GETFL) | O_NONBLOCK); + fcntl(sockfd, F_SETFD, FD_CLOEXEC); + + dbg_time("connect to %s sockfd = %d", name, sockfd); + + return sockfd; +} + +static void setTimespecRelative(struct timespec *p_ts, long long msec) +{ + struct timeval tv; + + gettimeofday(&tv, (struct timezone *) NULL); + + /* what's really funny about this is that I know + pthread_cond_timedwait just turns around and makes this + a relative time again */ + p_ts->tv_sec = tv.tv_sec + (msec / 1000); + p_ts->tv_nsec = (tv.tv_usec + (msec % 1000) * 1000L ) * 1000L; + if ((unsigned long)p_ts->tv_nsec >= 1000000000UL) { + p_ts->tv_sec += 1; + p_ts->tv_nsec -= 1000000000UL; + } +} + +int pthread_cond_timeout_np(pthread_cond_t *cond, pthread_mutex_t * mutex, unsigned msecs) { + if (msecs != 0) { + unsigned i; + unsigned t = msecs/4; + int ret = 0; + + if (t == 0) + t = 1; + + for (i = 0; i < msecs; i += t) { + struct timespec ts; + setTimespecRelative(&ts, t); +//very old uclibc do not support pthread_condattr_setclock(CLOCK_MONOTONIC) + ret = pthread_cond_timedwait(cond, mutex, &ts); //to advoid system time change + if (ret != ETIMEDOUT) { + if(ret) dbg_time("ret=%d, msecs=%u, t=%u", ret, msecs, t); + break; + } + } + + return ret; + } else { + return pthread_cond_wait(cond, mutex); + } +} + +const char * get_time(void) { + static char time_buf[128]; + struct timeval tv; + time_t time; + suseconds_t millitm; + struct tm *ti; + + gettimeofday (&tv, NULL); + + time= tv.tv_sec; + millitm = (tv.tv_usec + 500) / 1000; + + if (millitm == 1000) { + ++time; + millitm = 0; + } + + ti = localtime(&time); + sprintf(time_buf, "%02d-%02d_%02d:%02d:%02d:%03d", ti->tm_mon+1, ti->tm_mday, ti->tm_hour, ti->tm_min, ti->tm_sec, (int)millitm); + return time_buf; +} + +unsigned long clock_msec(void) +{ + struct timespec tm; + clock_gettime( CLOCK_MONOTONIC, &tm); + return (unsigned long)(tm.tv_sec*1000 + (tm.tv_nsec/1000000)); +} + +FILE *logfilefp = NULL; + +void update_resolv_conf(int iptype, const char *ifname, const char *dns1, const char *dns2) { + const char *dns_file = "/etc/resolv.conf"; + FILE *dns_fp; + char dns_line[256]; + #define MAX_DNS 16 + char *dns_info[MAX_DNS]; + char dns_tag[64]; + int dns_match = 0; + int i; + + snprintf(dns_tag, sizeof(dns_tag), "# IPV%d %s", iptype, ifname); + + for (i = 0; i < MAX_DNS; i++) + dns_info[i] = NULL; + + dns_fp = fopen(dns_file, "r"); + if (dns_fp) { + i = 0; + dns_line[sizeof(dns_line)-1] = '\0'; + + while((fgets(dns_line, sizeof(dns_line)-1, dns_fp)) != NULL) { + if ((strlen(dns_line) > 1) && (dns_line[strlen(dns_line) - 1] == '\n')) + dns_line[strlen(dns_line) - 1] = '\0'; + //dbg_time("%s", dns_line); + if (strstr(dns_line, dns_tag)) { + dns_match++; + continue; + } + dns_info[i++] = strdup(dns_line); + if (i == MAX_DNS) + break; + } + + fclose(dns_fp); + } + else if (errno != ENOENT) { + dbg_time("fopen %s fail, errno:%d (%s)", dns_file, errno, strerror(errno)); + return; + } + + if (dns1 == NULL && dns_match == 0) + return; + + dns_fp = fopen(dns_file, "w"); + if (dns_fp) { + if (dns1) + fprintf(dns_fp, "nameserver %s %s\n", dns1, dns_tag); + if (dns2) + fprintf(dns_fp, "nameserver %s %s\n", dns2, dns_tag); + + for (i = 0; i < MAX_DNS && dns_info[i]; i++) + fprintf(dns_fp, "%s\n", dns_info[i]); + fclose(dns_fp); + } + else { + dbg_time("fopen %s fail, errno:%d (%s)", dns_file, errno, strerror(errno)); + } + + for (i = 0; i < MAX_DNS && dns_info[i]; i++) + free(dns_info[i]); +} + +pid_t getpid_by_pdp(int pdp, const char* program_name) +{ + glob_t gt; + int ret; + char filter[16]; + pid_t pid; + + snprintf(filter, sizeof(filter), "-n %d", pdp); + ret = glob("/proc/*/cmdline", GLOB_NOSORT, NULL, >); + if (ret != 0) { + dbg_time("glob error, errno = %d(%s)", errno, strerror(errno)); + return -1; + } else { + int i = 0, fd = -1; + ssize_t nreads; + char cmdline[512] = {0}; + + for (i = 0; i < (int)gt.gl_pathc; i++) { + fd = open(gt.gl_pathv[i], O_RDONLY); + if (fd == -1) { + dbg_time("open %s failed, errno = %d(%s)", gt.gl_pathv[i], errno, strerror(errno)); + globfree(>); + return -1; + } + + nreads = read(fd, cmdline, sizeof(cmdline)); + if (nreads > 0) { + int pos = 0; + while (pos < nreads-1) { + if (cmdline[pos] == '\0') + cmdline[pos] = ' '; // space + pos++; + } + // printf("%s\n", cmdline); + } + + if (strstr(cmdline, program_name) && strstr(cmdline, filter)) { + char path[64] = {0}; + char pidstr[64] = {0}; + char *p; + + dbg_time("%s: %s", gt.gl_pathv[i], cmdline); + strcpy(path, gt.gl_pathv[i]); + p = strstr(gt.gl_pathv[i], "/cmdline"); + *p = '\0'; + while (*(--p) != '/') ; + + strcpy(pidstr, p+1); + pid = atoi(pidstr); + globfree(>); + + return pid; + } + } + } + + globfree(>); + return -1; +} + +void ql_get_driver_rmnet_info(PROFILE_T *profile, RMNET_INFO *rmnet_info) { + int ifc_ctl_sock; + struct ifreq ifr; + int rc; + int request = 0x89F3; + unsigned char data[512]; + + memset(rmnet_info, 0x00, sizeof(*rmnet_info)); + + ifc_ctl_sock = socket(AF_INET, SOCK_DGRAM, 0); + if (ifc_ctl_sock <= 0) { + dbg_time("socket() failed: %s\n", strerror(errno)); + return; + } + + memset(&ifr, 0, sizeof(struct ifreq)); + strncpy(ifr.ifr_name, profile->usbnet_adapter, IFNAMSIZ); + ifr.ifr_name[IFNAMSIZ - 1] = 0; + ifr.ifr_ifru.ifru_data = (void *)data; + + rc = ioctl(ifc_ctl_sock, request, &ifr); + if (rc < 0) { + if (errno != ENOTSUP) + dbg_time("ioctl(0x%x, qmap_settings) errno:%d (%s), rc=%d", request, errno, strerror(errno), rc); + } + else { + memcpy(rmnet_info, data, sizeof(*rmnet_info)); + } + + close(ifc_ctl_sock); +} + +void ql_set_driver_qmap_setting(PROFILE_T *profile, QMAP_SETTING *qmap_settings) { + int ifc_ctl_sock; + struct ifreq ifr; + int rc; + int request = 0x89F2; + + ifc_ctl_sock = socket(AF_INET, SOCK_DGRAM, 0); + if (ifc_ctl_sock <= 0) { + dbg_time("socket() failed: %s\n", strerror(errno)); + return; + } + + memset(&ifr, 0, sizeof(struct ifreq)); + strncpy(ifr.ifr_name, profile->usbnet_adapter, IFNAMSIZ); + ifr.ifr_name[IFNAMSIZ - 1] = 0; + ifr.ifr_ifru.ifru_data = (void *)qmap_settings; + + rc = ioctl(ifc_ctl_sock, request, &ifr); + if (rc < 0) { + dbg_time("ioctl(0x%x, qmap_settings) failed: %s, rc=%d", request, strerror(errno), rc); + } + + close(ifc_ctl_sock); +} + +void no_trunc_strncpy(char *dest, const char *src, size_t dest_size) +{ + size_t i = 0; + + for (i = 0; i < dest_size && *src; i++) { + *dest++ = *src++; + } + + *dest = 0; +} diff --git a/wwan/app/quectel_cm_5G/src/util.h b/wwan/app/quectel_cm_5G/src/util.h new file mode 100644 index 0000000..392d401 --- /dev/null +++ b/wwan/app/quectel_cm_5G/src/util.h @@ -0,0 +1,52 @@ +/** + @file + util.h + + @brief + This file provides the definitions, and declares some common APIs for list-algorithm. + + */ + +#ifndef _UTILS_H_ +#define _UTILS_H_ + +#include +#include + +struct listnode +{ + struct listnode *next; + struct listnode *prev; +}; + +#define node_to_item(node, container, member) \ + (container *) (((char*) (node)) - offsetof(container, member)) + +#define list_declare(name) \ + struct listnode name = { \ + .next = &name, \ + .prev = &name, \ + } + +#define list_for_each(node, list) \ + for (node = (list)->next; node != (list); node = node->next) + +#define list_for_each_reverse(node, list) \ + for (node = (list)->prev; node != (list); node = node->prev) + +void list_init(struct listnode *list); +void list_add_tail(struct listnode *list, struct listnode *item); +void list_add_head(struct listnode *head, struct listnode *item); +void list_remove(struct listnode *item); + +#define list_empty(list) ((list) == (list)->next) +#define list_head(list) ((list)->next) +#define list_tail(list) ((list)->prev) + +int epoll_register(int epoll_fd, int fd, unsigned int events); +int epoll_deregister(int epoll_fd, int fd); +const char * get_time(void); +unsigned long clock_msec(void); +pid_t getpid_by_pdp(int, const char*); + +#endif diff --git a/wwan/driver/quectel_MHI/Makefile b/wwan/driver/quectel_MHI/Makefile new file mode 100755 index 0000000..a0f7277 --- /dev/null +++ b/wwan/driver/quectel_MHI/Makefile @@ -0,0 +1,51 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=pcie_mhi +PKG_VERSION:=1.3.6 +PKG_RELEASE:=2 + +include $(INCLUDE_DIR)/kernel.mk +include $(INCLUDE_DIR)/package.mk + +PKG_BUILD_PARALLEL:=1 +PKG_BUILD_FLAGS:=gc-sections lto + +define KernelPackage/pcie_mhi + SUBMENU:=WWAN Support + TITLE:=Kernel pcie driver for MHI device + DEPENDS:=+pciids +pciutils +quectel-CM-5G +kmod-qmi_wwan_q + FILES:=$(PKG_BUILD_DIR)/pcie_mhi.ko + AUTOLOAD:=$(call AutoLoad,90,pcie_mhi) +endef + +define KernelPackage/pcie_mhi/description + Kernel module for register a custom pciemhi platform device. +endef + +EXTRA_CFLAGS+= \ + -I$(STAGING_DIR)/usr/include/qca-nss-drv \ + -Wno-unused-function + +MAKE_OPTS:= \ + ARCH="$(LINUX_KARCH)" \ + CROSS_COMPILE="$(TARGET_CROSS)" \ + EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \ + CXXFLAGS="$(TARGET_CXXFLAGS)" \ + M="$(PKG_BUILD_DIR)" \ + $(EXTRA_KCONFIG) + +define Build/Compile + +$(MAKE) -C "$(LINUX_DIR)" $(strip $(MAKE_OPTS)) \ + $(KERNEL_MAKE_FLAGS) \ + $(PKG_JOBS) \ + modules +endef + +$(eval $(call KernelPackage,pcie_mhi)) diff --git a/wwan/driver/quectel_MHI/src/Makefile b/wwan/driver/quectel_MHI/src/Makefile new file mode 100644 index 0000000..1b2c1f5 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/Makefile @@ -0,0 +1,34 @@ +#ccflags-y += -g +obj-m += pcie_mhi.o +pcie_mhi-objs := core/mhi_init.o core/mhi_main.o core/mhi_pm.o core/mhi_boot.o core/mhi_dtr.o controllers/mhi_qti.o +pcie_mhi-objs += devices/mhi_uci.o + +ifeq (1,1) +pcie_mhi-objs += devices/mhi_netdev_quectel.o +else +pcie_mhi-objs += devices/mhi_netdev.o +pcie_mhi-objs += devices/rmnet_handler.o +endif + +PWD := $(shell pwd) +ifeq ($(ARCH),) +ARCH := $(shell uname -m) +endif +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := +endif +ifeq ($(KDIR),) +KDIR := /lib/modules/$(shell uname -r)/build +endif + +pcie_mhi: clean + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules + #cp pcie_mhi.ko /tftpboot/ + +clean: + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) clean + find . -name *.o.ur-safe | xargs rm -f + +install: pcie_mhi + sudo cp pcie_mhi.ko /lib/modules/${shell uname -r}/kernel/drivers/pci/ + sudo depmod diff --git a/wwan/driver/quectel_MHI/src/README b/wwan/driver/quectel_MHI/src/README new file mode 100644 index 0000000..2bb6ff0 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/README @@ -0,0 +1,36 @@ +1. porting pcie_mhi driver as next + +$ git diff drivers/Makefile +diff --git a/drivers/Makefile b/drivers/Makefile +index 77fbc52..e45837e 100644 +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -184,3 +184,4 @@ obj-$(CONFIG_FPGA) += fpga/ + obj-$(CONFIG_FSI) += fsi/ + obj-$(CONFIG_TEE) += tee/ + obj-$(CONFIG_MULTIPLEXER) += mux/ ++obj-y += pcie_mhi/ + +$ tree drivers/pcie_mhi/ -L 1 +drivers/pcie_mhi/ + controllers + core + devices + Makefile + +2. check RG500 attach pcie_mhi driver successful + +root@OpenWrt:/# lspci +00:00.0 Class 0604: 17cb:0302 +01:00.0 Class ff00: 17cb:0306 + +root@OpenWrt:~# dmesg | grep mhi +[ 138.483252] mhi_init Quectel_Linux_PCIE_MHI_Driver_V1.3.0.6 +[ 138.492350] mhi_pci_probe pci_dev->name = 0000:01:00.0, domain=0, bus=1, slot=0, vendor=17CB, device=0306 + +3. how to use, see next logs + +log/QXDM_OVER_PCIE.txt +log/AT_OVER_PCIE.txt +log/MBIM_OVER_PCIE.txt +log/QMI_OVER_PCIE.txt diff --git a/wwan/driver/quectel_MHI/src/ReleaseNote.txt b/wwan/driver/quectel_MHI/src/ReleaseNote.txt new file mode 100644 index 0000000..d923b98 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/ReleaseNote.txt @@ -0,0 +1,103 @@ +Release Notes + +[V1.3.4] +Date: 12/8/2022 +enhancement: + 1. only allow to enable autosuspend when module is in MHI_EE_AMSS + 2. show pcie link speed and width when driver probe + 3. check pcie link status by read pcie vid and pid when driver probe, + if pcie link is down, return -EIO + 4. support RM520 (1eac:1004) + 5. support qmap command packet +fix: + 1. fix tx queue is wrong stop when do uplink TPUT + 2. fix after QFirehose, module fail to bootup at very small probability + 3. mhi uci add mutex lock for concurrent reads/writes + +[V1.3.3] +Date: 30/6/2022 +enhancement: + 1. remove one un-necessary kmalloc when do qfirehose + 2. support mhi monitor (like usbmon), usage: cat /sys/kernel/debug/mhi_q/0306_00\:01.00/mhimon + 3. set ring size of event 0 to 256 (from 1024), required by x6x + 4. support PCIE local network card mhi_swip0 (chan 46/47), default disabled + 5. porting IPQ5018 mhi rate controll code from spf11.5 + 6. set pcie rmnet download max qmap packet size to 15KB (same to IPQ MHI Driver) + 7. support set different mac address for rmnet net card + 8. when mhi netdev fail to malloc, use delay_work instead work + 9. optimize code for 'when driver load, modem is still in MHI_EE_PTHRU' +fix: + 1. Fix not synchronize access rp/wp when mhi_queue_xxx and mhi_process_xxx_ring run on different CPU + 2. set dma mask when driver probe, some SOC like rpi_4 need it + +[V1.3.2] +Date: 12/16/2021 +enhancement: + 1. support Linux Kernel V5.14 + 2. mhi_netdev_quectel.c do not print log in softirq context + +[V1.3.1] +Date: 9/26/2021 +enhancement: +fix: + +[V1.3.0.19] +Date: 9/18/2021 +enhancement: + 1. support sdx62 (17cb:0308) + 2. support IPQ5018's NSS + 3. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c + and pcie_mhi.ko must load after then rmnet_nss.ko + 4. allow bhi irq is not 0 (for ipq5018) +fix: + +[V1.3.0.18] +Date: 4/14/2021 +enhancement: + 1. support mbim multiple call, usage: + # insmod pcie_mhi.ko mhi_mbim_enabeld=1 qmap_mode=4 + # quectel-mbim-proxy -d /dev/mhi_MBIM & + # quectel-CM -n X +fix: + +[V1.3.0.17] +Date: 3/11/2021 +enhancement: +fix: + 1. fix CPU loading very high when TPUT test when only one MSI interrupt + 2. fix error on latest X24 modem + +[V1.3.0.16] +Date: 11/18/2020 +enhancement: +fix: + 1. add ring size to 32, for in-bound chan, if one ring is full, modem will not generate MSI interrupt for all chan + +[V1.3.0.15] +Date: 10/30/2020 +enhancement: + 1. support multi-modems, named as /dev/mhi_X +fix: + 1. fix compile error on kernel v5.8 + +[V1.3.0.14] +Date: 10/9/2020 +enhancement: + 1. suppport EM120&EM160 +fix: + 1. fix compile error on kernel v5.6 + 2. support runtime suspend + +[V1.3.0.13] +Date: 9/7/2020 +enhancement: + 1. suppport EM120&EM160 +fix: + 1. fix error on X55 + PCIE2.0(e.g IPQ4019) + 2. support runtime suspend + +[V1.3.0.12] +Date: 7/7/2020 +enhancement: + 1. suppport create only none netcard (enabled by marco MHI_NETDEV_ONE_CARD_MODE), +fix: \ No newline at end of file diff --git a/wwan/driver/quectel_MHI/src/controllers/Kconfig b/wwan/driver/quectel_MHI/src/controllers/Kconfig new file mode 100644 index 0000000..e18b38b --- /dev/null +++ b/wwan/driver/quectel_MHI/src/controllers/Kconfig @@ -0,0 +1,13 @@ +menu "MHI controllers" + +config MHI_QTI + tristate "MHI QTI" + depends on MHI_BUS + help + If you say yes to this option, MHI bus support for QTI modem chipsets + will be enabled. QTI PCIe based modems uses MHI as the communication + protocol. MHI control driver is the bus master for such modems. As the + bus master driver, it oversees power management operations such as + suspend, resume, powering on and off the device. + +endmenu diff --git a/wwan/driver/quectel_MHI/src/controllers/Makefile b/wwan/driver/quectel_MHI/src/controllers/Makefile new file mode 100644 index 0000000..ab9ec55 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/controllers/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MHI_QTI) += mhi_qti.o mhi_arch_qti.o diff --git a/wwan/driver/quectel_MHI/src/controllers/mhi_arch_qti.c b/wwan/driver/quectel_MHI/src/controllers/mhi_arch_qti.c new file mode 100644 index 0000000..de19d94 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/controllers/mhi_arch_qti.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" +#include "mhi_qti.h" + +struct arch_info { + struct mhi_dev *mhi_dev; + struct msm_bus_scale_pdata *msm_bus_pdata; + u32 bus_client; + struct pci_saved_state *pcie_state; + struct pci_saved_state *ref_pcie_state; + struct dma_iommu_mapping *mapping; +}; + +struct mhi_bl_info { + struct mhi_device *mhi_device; + async_cookie_t cookie; + void *ipc_log; +}; + +/* ipc log markings */ +#define DLOG "Dev->Host: " +#define HLOG "Host: " + +#ifdef CONFIG_MHI_DEBUG + +#define MHI_IPC_LOG_PAGES (100) +enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_VERBOSE; + +#else + +#define MHI_IPC_LOG_PAGES (10) +enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR; + +#endif + +static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + + MHI_LOG("Setting bus request to index %d\n", index); + + if (arch_info->bus_client) + return msm_bus_scale_client_update_request( + arch_info->bus_client, + index); + + /* default return success */ + return 0; +} + +static void mhi_bl_dl_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev); + char *buf = mhi_result->buf_addr; + + /* force a null at last character */ + buf[mhi_result->bytes_xferd - 1] = 0; + + ipc_log_string(mhi_bl_info->ipc_log, "%s %s", DLOG, buf); +} + +static void mhi_bl_dummy_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ +} + +static void mhi_bl_remove(struct mhi_device *mhi_dev) +{ + struct mhi_bl_info *mhi_bl_info = mhi_device_get_devdata(mhi_dev); + + ipc_log_string(mhi_bl_info->ipc_log, HLOG "Received Remove notif.\n"); + + /* wait for boot monitor to exit */ + async_synchronize_cookie(mhi_bl_info->cookie + 1); +} + +static void mhi_bl_boot_monitor(void *data, async_cookie_t cookie) +{ + struct mhi_bl_info *mhi_bl_info = data; + struct mhi_device *mhi_device = mhi_bl_info->mhi_device; + struct mhi_controller *mhi_cntrl = mhi_device->mhi_cntrl; + /* 15 sec timeout for booting device */ + const u32 timeout = msecs_to_jiffies(15000); + + /* wait for device to enter boot stage */ + wait_event_timeout(mhi_cntrl->state_event, mhi_cntrl->ee == MHI_EE_AMSS + || mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION, + timeout); + + if (mhi_cntrl->ee == MHI_EE_AMSS) { + ipc_log_string(mhi_bl_info->ipc_log, HLOG + "Device successfully booted to mission mode\n"); + + mhi_unprepare_from_transfer(mhi_device); + } else { + ipc_log_string(mhi_bl_info->ipc_log, HLOG + "Device failed to boot to mission mode, ee = %s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + } +} + +static int mhi_bl_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + char node_name[32]; + struct mhi_bl_info *mhi_bl_info; + + mhi_bl_info = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_bl_info), + GFP_KERNEL); + if (!mhi_bl_info) + return -ENOMEM; + + snprintf(node_name, sizeof(node_name), "mhi_bl_%04x_%02u.%02u.%02u", + mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot); + + mhi_bl_info->ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES, + node_name, 0); + if (!mhi_bl_info->ipc_log) + return -EINVAL; + + mhi_bl_info->mhi_device = mhi_dev; + mhi_device_set_devdata(mhi_dev, mhi_bl_info); + + ipc_log_string(mhi_bl_info->ipc_log, HLOG + "Entered SBL, Session ID:0x%x\n", + mhi_dev->mhi_cntrl->session_id); + + /* start a thread to monitor entering mission mode */ + mhi_bl_info->cookie = async_schedule(mhi_bl_boot_monitor, mhi_bl_info); + + return 0; +} + +static const struct mhi_device_id mhi_bl_match_table[] = { + { .chan = "BL" }, + {}, +}; + +static struct mhi_driver mhi_bl_driver = { + .id_table = mhi_bl_match_table, + .remove = mhi_bl_remove, + .probe = mhi_bl_probe, + .ul_xfer_cb = mhi_bl_dummy_cb, + .dl_xfer_cb = mhi_bl_dl_cb, + .driver = { + .name = "MHI_BL", + .owner = THIS_MODULE, + }, +}; + +int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + char node[32]; + + if (!arch_info) { + arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev, + sizeof(*arch_info), GFP_KERNEL); + if (!arch_info) + return -ENOMEM; + + mhi_dev->arch_info = arch_info; + + snprintf(node, sizeof(node), "mhi_%04x_%02u.%02u.%02u", + mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, + mhi_cntrl->slot); + mhi_cntrl->log_buf = ipc_log_context_create(MHI_IPC_LOG_PAGES, + node, 0); + mhi_cntrl->log_lvl = mhi_ipc_log_lvl; + + /* save reference state for pcie config space */ + arch_info->ref_pcie_state = pci_store_saved_state( + mhi_dev->pci_dev); + + mhi_driver_register(&mhi_bl_driver); + } + + return mhi_arch_set_bus_request(mhi_cntrl, 1); +} + +void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ + mhi_arch_set_bus_request(mhi_cntrl, 0); +} + +int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + + MHI_LOG("Entered\n"); + + if (graceful) { + pci_clear_master(pci_dev); + ret = pci_save_state(mhi_dev->pci_dev); + if (ret) { + MHI_ERR("Failed with pci_save_state, ret:%d\n", ret); + return ret; + } + + arch_info->pcie_state = pci_store_saved_state(pci_dev); + pci_disable_device(pci_dev); + } + + /* + * We will always attempt to put link into D3hot, however + * link down may have happened due to error fatal, so + * ignoring the return code + */ + pci_set_power_state(pci_dev, PCI_D3hot); + + /* release the resources */ + mhi_arch_set_bus_request(mhi_cntrl, 0); + + MHI_LOG("Exited\n"); + + return 0; +} + +int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + + MHI_LOG("Entered\n"); + + /* request resources and establish link trainning */ + ret = mhi_arch_set_bus_request(mhi_cntrl, 1); + if (ret) + MHI_LOG("Could not set bus frequency, ret:%d\n", ret); + + ret = pci_set_power_state(pci_dev, PCI_D0); + if (ret) { + MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Failed to enable device, ret:%d\n", ret); + return ret; + } + + ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state); + if (ret) + MHI_LOG("Failed to load saved cfg state\n"); + + pci_restore_state(pci_dev); + pci_set_master(pci_dev); + + MHI_LOG("Exited\n"); + + return 0; +} diff --git a/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.c b/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.c new file mode 100644 index 0000000..df6ce19 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.c @@ -0,0 +1,715 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" +#include "mhi_qcom.h" + +#if 1 +#ifndef PCI_IRQ_MSI +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 )) +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msi_block(dev, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif + +static int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_enable_msi_range(dev, min_vecs, max_vecs); +} + +static void pci_free_irq_vectors(struct pci_dev *dev) +{ + pci_disable_msi(dev); +} + +static int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + return dev->irq + nr; +} +#endif +#endif + +static struct pci_device_id mhi_pcie_device_id[] = { + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, //SDX20 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX24 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55 + {PCI_DEVICE(0x2C7C, 0x0512)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)}, + {0}, +}; + +MODULE_DEVICE_TABLE(pci, mhi_pcie_device_id); + +static struct pci_driver mhi_pcie_driver; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + + pci_free_irq_vectors(pci_dev); + iounmap(mhi_cntrl->regs); + mhi_cntrl->regs = NULL; + pci_clear_master(pci_dev); + pci_release_region(pci_dev, mhi_dev->resn); + pci_disable_device(pci_dev); +} + +static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + resource_size_t start, len; + int i; + + mhi_dev->resn = MHI_PCI_BAR_NUM; + ret = pci_assign_resource(pci_dev, mhi_dev->resn); + if (ret) { + MHI_ERR("Error assign pci resources, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Error enabling device, ret:%d\n", ret); + goto error_enable_device; + } + + ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi"); + if (ret) { + MHI_ERR("Error pci_request_region, ret:%d\n", ret); + goto error_request_region; + } + + pci_set_master(pci_dev); + + start = pci_resource_start(pci_dev, mhi_dev->resn); + len = pci_resource_len(pci_dev, mhi_dev->resn); + mhi_cntrl->regs = ioremap_nocache(start, len); + MHI_LOG("mhi_cntrl->regs = %p\n", mhi_cntrl->regs); + if (!mhi_cntrl->regs) { + MHI_ERR("Error ioremap region\n"); + goto error_ioremap; + } + + ret = pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI); + if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) { + if (ret == -ENOSPC) { + /* imx_3.14.52_1.1.0_ga + diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c + index f06e8f0..6a9614f 100644 + --- a/drivers/pci/host/pcie-designware.c + +++ b/drivers/pci/host/pcie-designware.c + @@ -376,6 +376,13 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, + if (msgvec > 5) + msgvec = 0; + + +#if 1 //Add by Quectel 20190419 + + if (msgvec > 0 && pdev->vendor == 0x17cb) { + + dev_info(&pdev->dev, "%s quectel fixup pos=%d, msg_ctr=%04x, msgvec=%d\n", __func__, desc->msi_attrib.pos, msg_ctr, msgvec); + + msgvec = 0; + + } + +#endif + + + irq = assign_irq((1 << msgvec), desc, &pos); + if (irq < 0) + return irq; + */ + } + //imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device + if (ret != 1) { + MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required); + goto error_req_msi; + } + } + + mhi_cntrl->msi_allocated = ret; + MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, mhi_cntrl->msi_allocated, pci_dev->irq); + + for (i = 0; i < mhi_cntrl->msi_allocated; i++) { + mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i); + if (mhi_cntrl->irq[i] < 0) { + ret = mhi_cntrl->irq[i]; + goto error_get_irq_vec; + } + } + +#if 0 + /* configure runtime pm */ + pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS); + pm_runtime_dont_use_autosuspend(&pci_dev->dev); + pm_suspend_ignore_children(&pci_dev->dev, true); + + /* + * pci framework will increment usage count (twice) before + * calling local device driver probe function. + * 1st pci.c pci_pm_init() calls pm_runtime_forbid + * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync + * Framework expect pci device driver to call + * pm_runtime_put_noidle to decrement usage count after + * successful probe and and call pm_runtime_allow to enable + * runtime suspend. + */ + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_put_noidle(&pci_dev->dev); +#endif + + return 0; + +error_get_irq_vec: + pci_free_irq_vectors(pci_dev); + +error_req_msi: + iounmap(mhi_cntrl->regs); + +error_ioremap: + pci_clear_master(pci_dev); + +error_request_region: + pci_disable_device(pci_dev); + +error_enable_device: + pci_release_region(pci_dev, mhi_dev->resn); + + return ret; +} + +#ifdef CONFIG_PM +static int mhi_runtime_idle(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Entered returning -EBUSY\n"); + + /* + * RPM framework during runtime resume always calls + * rpm_idle to see if device ready to suspend. + * If dev.power usage_count count is 0, rpm fw will call + * rpm_idle cb to see if device is ready to suspend. + * if cb return 0, or cb not defined the framework will + * assume device driver is ready to suspend; + * therefore, fw will schedule runtime suspend. + * In MHI power management, MHI host shall go to + * runtime suspend only after entering MHI State M2, even if + * usage count is 0. Return -EBUSY to disable automatic suspend. + */ + return -EBUSY; +} + +static int mhi_runtime_suspend(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_pm_suspend(mhi_cntrl); + if (ret) { + MHI_LOG("Abort due to ret:%d\n", ret); + goto exit_runtime_suspend; + } + + ret = mhi_arch_link_off(mhi_cntrl, true); + if (ret) + MHI_ERR("Failed to Turn off link ret:%d\n", ret); + +exit_runtime_suspend: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with ret:%d\n", ret); + + return ret; +} + +static int mhi_runtime_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + if (!mhi_dev->powered_on) { + MHI_LOG("Not fully powered, return success\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + + /* turn on link */ + ret = mhi_arch_link_on(mhi_cntrl); + if (ret) + goto rpm_resume_exit; + + /* enter M0 state */ + ret = mhi_pm_resume(mhi_cntrl); + +rpm_resume_exit: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with :%d\n", ret); + + return ret; +} + +static int mhi_system_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + ret = mhi_runtime_resume(dev); + if (ret) { + MHI_ERR("Failed to resume link\n"); + } else { + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + + return ret; +} + +int mhi_system_suspend(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + MHI_LOG("Entered\n"); + + /* if rpm status still active then force suspend */ + if (!pm_runtime_status_suspended(dev)) + return mhi_runtime_suspend(dev); + + pm_runtime_set_suspended(dev); + pm_runtime_disable(dev); + + MHI_LOG("Exit\n"); + return 0; +} +#endif + +/* checks if link is down */ +static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + u16 dev_id; + int ret; + + /* try reading device id, if dev id don't match, link is down */ + ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id); + + return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0; +} + +static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + return pm_runtime_get(dev); +} + +static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + pm_runtime_put_noidle(dev); +} + +static void mhi_status_cb(struct mhi_controller *mhi_cntrl, + void *priv, + enum MHI_CB reason) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + if (reason == MHI_CB_IDLE) { + MHI_LOG("Schedule runtime suspend 1\n"); + pm_runtime_mark_last_busy(dev); + pm_request_autosuspend(dev); + } +} + +int mhi_debugfs_trigger_m0(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Exit\n"); + pm_runtime_get(&mhi_dev->pci_dev->dev); + pm_runtime_put(&mhi_dev->pci_dev->dev); + + return 0; +} + +int mhi_debugfs_trigger_m3(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Entry\n"); + pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev); + pm_request_autosuspend(&mhi_dev->pci_dev->dev); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL, + mhi_debugfs_trigger_m0, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL, + mhi_debugfs_trigger_m3, "%llu\n"); + +static int mhi_init_debugfs_trigger_go(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + + MHI_LOG("Trigger power up sequence\n"); + + mhi_async_power_up(mhi_cntrl); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(mhi_init_debugfs_trigger_go_fops, NULL, + mhi_init_debugfs_trigger_go, "%llu\n"); + + +int mhi_init_debugfs_debug_show(struct seq_file *m, void *d) +{ + seq_puts(m, "Enable debug mode to debug external soc\n"); + seq_puts(m, + "Usage: echo 'devid,timeout,domain,smmu_cfg' > debug_mode\n"); + seq_puts(m, "No spaces between parameters\n"); + seq_puts(m, "\t1. devid : 0 or pci device id to register\n"); + seq_puts(m, "\t2. timeout: mhi cmd/state transition timeout\n"); + seq_puts(m, "\t3. domain: Rootcomplex\n"); + seq_puts(m, "\t4. smmu_cfg: smmu configuration mask:\n"); + seq_puts(m, "\t\t- BIT0: ATTACH\n"); + seq_puts(m, "\t\t- BIT1: S1 BYPASS\n"); + seq_puts(m, "\t\t-BIT2: FAST_MAP\n"); + seq_puts(m, "\t\t-BIT3: ATOMIC\n"); + seq_puts(m, "\t\t-BIT4: FORCE_COHERENT\n"); + seq_puts(m, "\t\t-BIT5: GEOMETRY\n"); + seq_puts(m, "\tAll timeout are in ms, enter 0 to keep default\n"); + seq_puts(m, "Examples inputs: '0x307,10000'\n"); + seq_puts(m, "\techo '0,10000,1'\n"); + seq_puts(m, "\techo '0x307,10000,0,0x3d'\n"); + seq_puts(m, "firmware image name will be changed to debug.mbn\n"); + + return 0; +} + +static int mhi_init_debugfs_debug_open(struct inode *node, struct file *file) +{ + return single_open(file, mhi_init_debugfs_debug_show, NULL); +} + +static ssize_t mhi_init_debugfs_debug_write(struct file *fp, + const char __user *ubuf, + size_t count, + loff_t *pos) +{ + char *buf = kmalloc(count + 1, GFP_KERNEL); + /* #,devid,timeout,domain,smmu-cfg */ + int args[5] = {0}; + static char const *dbg_fw = "debug.mbn"; + int ret; + struct mhi_controller *mhi_cntrl = fp->f_inode->i_private; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_device_id *id; + + if (!buf) + return -ENOMEM; + + ret = copy_from_user(buf, ubuf, count); + if (ret) + goto error_read; + buf[count] = 0; + get_options(buf, ARRAY_SIZE(args), args); + kfree(buf); + + /* override default parameters */ + mhi_cntrl->fw_image = dbg_fw; + mhi_cntrl->edl_image = dbg_fw; + + if (args[0] >= 2 && args[2]) + mhi_cntrl->timeout_ms = args[2]; + + if (args[0] >= 3 && args[3]) + mhi_cntrl->domain = args[3]; + + if (args[0] >= 4 && args[4]) + mhi_dev->smmu_cfg = args[4]; + + /* If it's a new device id register it */ + if (args[0] && args[1]) { + /* find the debug_id and overwrite it */ + for (id = mhi_pcie_device_id; id->vendor; id++) + if (id->device == MHI_PCIE_DEBUG_ID) { + id->device = args[1]; + pci_unregister_driver(&mhi_pcie_driver); + ret = pci_register_driver(&mhi_pcie_driver); + } + } + + mhi_dev->debug_mode = true; + debugfs_create_file("go", 0444, mhi_cntrl->parent, mhi_cntrl, + &mhi_init_debugfs_trigger_go_fops); + pr_info( + "%s: ret:%d pcidev:0x%x smm_cfg:%u timeout:%u\n", + __func__, ret, args[1], mhi_dev->smmu_cfg, + mhi_cntrl->timeout_ms); + return count; + +error_read: + kfree(buf); + return ret; +} + +static const struct file_operations debugfs_debug_ops = { + .open = mhi_init_debugfs_debug_open, + .release = single_release, + .read = seq_read, + .write = mhi_init_debugfs_debug_write, +}; + +static struct mhi_controller * mhi_platform_probe(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl; + struct mhi_dev *mhi_dev; + u64 addr_win[2]; + int ret; + + mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev)); + if (!mhi_cntrl) { + pr_err("mhi_alloc_controller fail\n"); + return NULL; + } + + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev_id = pci_dev->device; + mhi_cntrl->domain = pci_domain_nr(pci_dev->bus); + mhi_cntrl->bus = pci_dev->bus->number; + mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn); + mhi_dev->smmu_cfg = 0; + #if 0 //def CONFIG_HAVE_MEMBLOCK + addr_win[0] = memblock_start_of_DRAM(); + addr_win[1] = memblock_end_of_DRAM(); + #else +#define MHI_MEM_BASE_DEFAULT 0x000000000 +#define MHI_MEM_SIZE_DEFAULT 0x2000000000 + addr_win[0] = MHI_MEM_BASE_DEFAULT; + addr_win[1] = MHI_MEM_SIZE_DEFAULT; + if (sizeof(dma_addr_t) == 4) { + addr_win[1] = 0xFFFFFFFF; + } + #endif + + mhi_cntrl->iova_start = addr_win[0]; + mhi_cntrl->iova_stop = addr_win[1]; + + mhi_dev->pci_dev = pci_dev; + mhi_cntrl->pci_dev = pci_dev; + + /* setup power management apis */ + mhi_cntrl->status_cb = mhi_status_cb; + mhi_cntrl->runtime_get = mhi_runtime_get; + mhi_cntrl->runtime_put = mhi_runtime_put; + mhi_cntrl->link_status = mhi_link_status; + + ret = mhi_arch_platform_init(mhi_dev); + if (ret) + goto error_probe; + + ret = mhi_register_mhi_controller(mhi_cntrl); + if (ret) + goto error_register; + + if (mhi_cntrl->parent) + debugfs_create_file("debug_mode", 0444, mhi_cntrl->parent, + mhi_cntrl, &debugfs_debug_ops); + + return mhi_cntrl; + +error_register: + mhi_arch_platform_deinit(mhi_dev); + +error_probe: + mhi_free_controller(mhi_cntrl); + + return NULL; +} + +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id) +{ + struct mhi_controller *mhi_cntrl = NULL; + u32 domain = pci_domain_nr(pci_dev->bus); + u32 bus = pci_dev->bus->number; + u32 slot = PCI_SLOT(pci_dev->devfn); + struct mhi_dev *mhi_dev; + int ret; + + pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n", + __func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device); + + mhi_cntrl = mhi_platform_probe(pci_dev); + if (!mhi_cntrl) { + pr_err("mhi_platform_probe fail\n"); + return -EPROBE_DEFER; + } + + mhi_cntrl->dev_id = pci_dev->device; + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + mhi_dev->pci_dev = pci_dev; + mhi_dev->powered_on = true; + + ret = mhi_arch_pcie_init(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_arch_pcie_init, ret:%d\n", ret); + return ret; + } + + ret = mhi_arch_iommu_init(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_arch_iommu_init, ret:%d\n", ret); + goto error_iommu_init; + } + + ret = mhi_init_pci_dev(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_init_pci_dev, ret:%d\n", ret); + goto error_init_pci; + } + + /* start power up sequence if not in debug mode */ + if (!mhi_dev->debug_mode) { + ret = mhi_async_power_up(mhi_cntrl); + if (ret) { + MHI_ERR("Error mhi_async_power_up, ret:%d\n", ret); + goto error_power_up; + } + } + +#if 0 + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_allow(&pci_dev->dev); + pm_runtime_disable(&pci_dev->dev); +#endif + + if (mhi_cntrl->dentry) { + debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m0_fops); + debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m3_fops); + } + + dev_set_drvdata(&pci_dev->dev, mhi_cntrl); + MHI_LOG("Return successful\n"); + + return 0; + +error_power_up: + mhi_deinit_pci_dev(mhi_cntrl); + +error_init_pci: + mhi_arch_iommu_deinit(mhi_cntrl); + +error_iommu_init: + mhi_arch_pcie_deinit(mhi_cntrl); + + return ret; +} + +static void mhi_pci_remove(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl = (struct mhi_controller *)dev_get_drvdata(&pci_dev->dev); + + if (mhi_cntrl && mhi_cntrl->pci_dev == pci_dev) { + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + MHI_LOG("%s\n", dev_name(&pci_dev->dev)); + if (!mhi_dev->debug_mode) { + mhi_power_down(mhi_cntrl, 1); + } + mhi_deinit_pci_dev(mhi_cntrl); + mhi_arch_iommu_deinit(mhi_cntrl); + mhi_arch_pcie_deinit(mhi_cntrl); + mhi_unregister_mhi_controller(mhi_cntrl); + } +} + +static const struct dev_pm_ops pm_ops = { + SET_RUNTIME_PM_OPS(mhi_runtime_suspend, + mhi_runtime_resume, + mhi_runtime_idle) + SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume) +}; + +static struct pci_driver mhi_pcie_driver = { + .name = "mhi", + .id_table = mhi_pcie_device_id, + .probe = mhi_pci_probe, + .remove = mhi_pci_remove, + .driver = { + .pm = &pm_ops + } +}; + +int __init mhi_controller_qcom_init(void) +{ + return pci_register_driver(&mhi_pcie_driver); +}; + +void mhi_controller_qcom_exit(void) +{ + pr_info("%s enter\n", __func__); + pci_unregister_driver(&mhi_pcie_driver); + pr_info("%s exit\n", __func__); +} diff --git a/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.h b/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.h new file mode 100644 index 0000000..bced45b --- /dev/null +++ b/wwan/driver/quectel_MHI/src/controllers/mhi_qcom.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MHI_QCOM_ +#define _MHI_QCOM_ + +/* iova cfg bitmask */ +#define MHI_SMMU_ATTACH BIT(0) +#define MHI_SMMU_S1_BYPASS BIT(1) +#define MHI_SMMU_FAST BIT(2) +#define MHI_SMMU_ATOMIC BIT(3) +#define MHI_SMMU_FORCE_COHERENT BIT(4) + +#define MHI_PCIE_VENDOR_ID (0x17cb) +#define MHI_PCIE_DEBUG_ID (0xffff) +#define MHI_RPM_SUSPEND_TMR_MS (3000) +#define MHI_PCI_BAR_NUM (0) + +struct mhi_dev { + struct pci_dev *pci_dev; + u32 smmu_cfg; + int resn; + void *arch_info; + bool powered_on; + bool debug_mode; +}; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl); +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,65 )) +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} +#endif + +static inline int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev = &mhi_dev->pci_dev->dev; + + return dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64)); +} + +static inline void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +static inline void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ +} + +static inline int mhi_arch_platform_init(struct mhi_dev *mhi_dev) +{ + return 0; +} + +static inline void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev) +{ +} + +static inline int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, + bool graceful) +{ + return 0; +} + +static inline int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ + return 0; +} + +#endif /* _MHI_QCOM_ */ diff --git a/wwan/driver/quectel_MHI/src/controllers/mhi_qti.c b/wwan/driver/quectel_MHI/src/controllers/mhi_qti.c new file mode 100644 index 0000000..4a064a7 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/controllers/mhi_qti.c @@ -0,0 +1,1306 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define MAX_MHI 8 +#ifdef CONFIG_PCI_MSM +#define QCOM_AP_AND_EFUSE_PCIE_SLEEP +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP +#include +#include +#endif +#endif +//#define QCOM_AP_SDM845_IOMMU_MAP +#ifdef QCOM_AP_SDM845_IOMMU_MAP +#include +#include +#include +#endif +#include "../core/mhi.h" +#include "../core/mhi_internal.h" +#include "mhi_qti.h" + +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP +extern int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); +struct arch_info { + struct mhi_dev *mhi_dev; + struct msm_bus_scale_pdata *msm_bus_pdata; + u32 bus_client; + struct pci_saved_state *pcie_state; + struct pci_saved_state *ref_pcie_state; + struct dma_iommu_mapping *mapping; +}; +#endif + +#if 1 +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,65 )) +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} +#endif + +#ifdef PCI_IRQ_NOMSIX +#define PCI_IRQ_MSI PCI_IRQ_NOMSIX +#endif + +#ifndef PCI_IRQ_MSI +#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 )) +int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) +{ + int nvec = maxvec; + int rc; + + if (maxvec < minvec) + return -ERANGE; + + do { + rc = pci_enable_msi_block(dev, nvec); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + + return nvec; +} +#endif + +static int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_enable_msi_range(dev, min_vecs, max_vecs); +} + +static void pci_free_irq_vectors(struct pci_dev *dev) +{ + pci_disable_msi(dev); +} + +static int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ +#if 0//defined(CONFIG_PINCTRL_IPQ5018) + struct pcie_port *pp = dev->bus->sysdata; + pp->msi[nr]; //msi maybe not continuous +#endif + return dev->irq + nr; +} +#endif +#endif + +struct firmware_info { + unsigned int dev_id; + const char *fw_image; + const char *edl_image; +}; + +static const struct firmware_info firmware_table[] = { + {.dev_id = 0x306, .fw_image = "sdx55m/sbl1.mbn"}, + {.dev_id = 0x305, .fw_image = "sdx50m/sbl1.mbn"}, + {.dev_id = 0x304, .fw_image = "sbl.mbn", .edl_image = "edl.mbn"}, + /* default, set to debug.mbn */ + {.fw_image = "debug.mbn"}, +}; + +static int debug_mode; +module_param_named(debug_mode, debug_mode, int, 0644); + +int mhi_debugfs_trigger_m0(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Exit\n"); + pm_runtime_get(&mhi_dev->pci_dev->dev); + pm_runtime_put(&mhi_dev->pci_dev->dev); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m0_fops, NULL, + mhi_debugfs_trigger_m0, "%llu\n"); + +int mhi_debugfs_trigger_m3(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Trigger M3 Entry\n"); + pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev); + pm_request_autosuspend(&mhi_dev->pci_dev->dev); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_m3_fops, NULL, + mhi_debugfs_trigger_m3, "%llu\n"); + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_dont_use_autosuspend(&pci_dev->dev); + pm_runtime_disable(&pci_dev->dev); + pci_free_irq_vectors(pci_dev); + kfree(mhi_cntrl->irq); + mhi_cntrl->irq = NULL; + iounmap(mhi_cntrl->regs); + mhi_cntrl->regs = NULL; + pci_clear_master(pci_dev); + pci_release_region(pci_dev, mhi_dev->resn); + pci_disable_device(pci_dev); +} + +static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + resource_size_t len; + int i; + + mhi_dev->resn = MHI_PCI_BAR_NUM; + ret = pci_assign_resource(pci_dev, mhi_dev->resn); + if (ret) { + MHI_ERR("Error assign pci resources, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Error enabling device, ret:%d\n", ret); + goto error_enable_device; + } + + ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi"); + if (ret) { + MHI_ERR("Error pci_request_region, ret:%d\n", ret); + goto error_request_region; + } + + pci_set_master(pci_dev); + +#if 1 //some SOC like rpi_4b need next codes + ret = -EIO; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) + if (!dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64))) { + ret = dma_set_coherent_mask(&pci_dev->dev, DMA_BIT_MASK(64)); + } else if (!dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { + ret = dma_set_coherent_mask(&pci_dev->dev, DMA_BIT_MASK(32)); + } +#else + if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { + ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); + } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { + ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); + } +#endif + if (ret) { + MHI_ERR("Error dma mask\n"); + } +#endif + + mhi_cntrl->base_addr = pci_resource_start(pci_dev, mhi_dev->resn); + len = pci_resource_len(pci_dev, mhi_dev->resn); +#ifndef ioremap_nocache //4bdc0d676a643140bdf17dbf7eafedee3d496a3c +#define ioremap_nocache ioremap +#endif + mhi_cntrl->regs = ioremap_nocache(mhi_cntrl->base_addr, len); + if (!mhi_cntrl->regs) { + MHI_ERR("Error ioremap region\n"); + goto error_ioremap; + } + +#if 0 + ret = pci_alloc_irq_vectors(pci_dev, mhi_cntrl->msi_required, + mhi_cntrl->msi_required, PCI_IRQ_NOMSIX); + if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) { + MHI_ERR("Failed to enable MSI, ret:%d\n", ret); + goto error_req_msi; + } +#else + ret = pci_alloc_irq_vectors(pci_dev, 1, mhi_cntrl->msi_required, PCI_IRQ_MSI); + if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) { + if (ret == -ENOSPC) { + /* imx_3.14.52_1.1.0_ga + diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c + index f06e8f0..6a9614f 100644 + --- a/drivers/pci/host/pcie-designware.c + +++ b/drivers/pci/host/pcie-designware.c + @@ -376,6 +376,13 @@ static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, + if (msgvec > 5) + msgvec = 0; + + +#if 1 //Add by Quectel 20190419 + + if (msgvec > 0 && pdev->vendor == 0x17cb) { + + dev_info(&pdev->dev, "%s quectel fixup pos=%d, msg_ctr=%04x, msgvec=%d\n", __func__, desc->msi_attrib.pos, msg_ctr, msgvec); + + msgvec = 0; + + } + +#endif + + + irq = assign_irq((1 << msgvec), desc, &pos); + if (irq < 0) + return irq; + */ + } + //imx_4.1.15_2.0.0_ga & DELL_OPTIPLEX_7010 only alloc one msi interrupt for one pcie device + if (ret != 1) { + MHI_ERR("Failed to enable MSI, ret=%d, msi_required=%d\n", ret, mhi_cntrl->msi_required); + goto error_req_msi; + } + } + MHI_LOG("msi_required = %d, msi_allocated = %d, msi_irq = %u\n", mhi_cntrl->msi_required, ret, pci_dev->irq); +#endif + + mhi_cntrl->msi_allocated = ret; + mhi_cntrl->irq = kmalloc_array(mhi_cntrl->msi_allocated, + sizeof(*mhi_cntrl->irq), GFP_KERNEL); + if (!mhi_cntrl->irq) { + ret = -ENOMEM; + goto error_alloc_msi_vec; + } + + for (i = 0; i < mhi_cntrl->msi_allocated; i++) { + mhi_cntrl->irq[i] = pci_irq_vector(pci_dev, i); + if (mhi_cntrl->irq[i] < 0) { + ret = mhi_cntrl->irq[i]; + goto error_get_irq_vec; + } + } + + dev_set_drvdata(&pci_dev->dev, mhi_cntrl); + + /* configure runtime pm */ + pm_runtime_set_autosuspend_delay(&pci_dev->dev, MHI_RPM_SUSPEND_TMR_MS); + pm_runtime_use_autosuspend(&pci_dev->dev); + pm_suspend_ignore_children(&pci_dev->dev, true); + + /* + * pci framework will increment usage count (twice) before + * calling local device driver probe function. + * 1st pci.c pci_pm_init() calls pm_runtime_forbid + * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync + * Framework expect pci device driver to call + * pm_runtime_put_noidle to decrement usage count after + * successful probe and and call pm_runtime_allow to enable + * runtime suspend. + */ + pm_runtime_mark_last_busy(&pci_dev->dev); + pm_runtime_put_noidle(&pci_dev->dev); + + return 0; + +error_get_irq_vec: + kfree(mhi_cntrl->irq); + mhi_cntrl->irq = NULL; + +error_alloc_msi_vec: + pci_free_irq_vectors(pci_dev); + +error_req_msi: + iounmap(mhi_cntrl->regs); + +error_ioremap: + pci_clear_master(pci_dev); + +error_request_region: + pci_disable_device(pci_dev); + +error_enable_device: + pci_release_region(pci_dev, mhi_dev->resn); + + return ret; +} + +#ifdef CONFIG_PM +static int mhi_runtime_suspend(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + if (!mhi_dev->powered_on) { + MHI_LOG("Not fully powered, return success\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + + if (mhi_cntrl->ee != MHI_EE_AMSS) { + MHI_LOG("Not AMSS, return busy\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return -EBUSY; + } + + ret = mhi_pm_suspend(mhi_cntrl); + if (ret) { + MHI_LOG("Abort due to ret:%d\n", ret); + goto exit_runtime_suspend; + } + + ret = mhi_arch_link_off(mhi_cntrl, true); + if (ret) + MHI_ERR("Failed to Turn off link ret:%d\n", ret); + +exit_runtime_suspend: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with ret:%d\n", ret); + + return ret; +} + +static int mhi_runtime_idle(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + if ((mhi_cntrl->dev_state == MHI_STATE_M0 || mhi_cntrl->dev_state == MHI_STATE_M3) + && mhi_cntrl->ee == MHI_EE_AMSS) { + return 0; + } + MHI_LOG("Entered returning -EBUSY, mhi_state:%s exec_env:%s\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + + /* + * RPM framework during runtime resume always calls + * rpm_idle to see if device ready to suspend. + * If dev.power usage_count count is 0, rpm fw will call + * rpm_idle cb to see if device is ready to suspend. + * if cb return 0, or cb not defined the framework will + * assume device driver is ready to suspend; + * therefore, fw will schedule runtime suspend. + * In MHI power management, MHI host shall go to + * runtime suspend only after entering MHI State M2, even if + * usage count is 0. Return -EBUSY to disable automatic suspend. + */ + return -EBUSY; +} + +static int mhi_runtime_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + MHI_LOG("Enter\n"); + + mutex_lock(&mhi_cntrl->pm_mutex); + + if (!mhi_dev->powered_on) { + MHI_LOG("Not fully powered, return success\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return 0; + } + + /* turn on link */ + ret = mhi_arch_link_on(mhi_cntrl); + if (ret) + goto rpm_resume_exit; + + /* enter M0 state */ + ret = mhi_pm_resume(mhi_cntrl); + +rpm_resume_exit: + mutex_unlock(&mhi_cntrl->pm_mutex); + MHI_LOG("Exited with :%d\n", ret); + + return ret; +} + +static int mhi_system_resume(struct device *dev) +{ + int ret = 0; + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + + ret = mhi_runtime_resume(dev); + if (ret) { + MHI_ERR("Failed to resume link\n"); + } else { + //pm_runtime_set_active(dev); + //pm_runtime_enable(dev); + } + + return ret; +} + +int mhi_system_suspend(struct device *dev) +{ + struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev); + int ret; + + MHI_LOG("Entered\n"); + + if (atomic_read(&mhi_cntrl->pending_pkts)) { + MHI_LOG("Abort due to pending_pkts:%d\n", atomic_read(&mhi_cntrl->pending_pkts)); + return -EBUSY; + } + + /* if rpm status still active then force suspend */ + if (!pm_runtime_status_suspended(dev)) { + ret = mhi_runtime_suspend(dev); + if (ret) { + MHI_LOG("suspend failed ret:%d\n", ret); + return ret; + } + } + + //pm_runtime_set_suspended(dev); + //pm_runtime_disable(dev); + + MHI_LOG("Exit\n"); + return 0; +} +#endif + +/* checks if link is down */ +static int mhi_link_status(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + u16 dev_id; + int ret; + + /* try reading device id, if dev id don't match, link is down */ + ret = pci_read_config_word(mhi_dev->pci_dev, PCI_DEVICE_ID, &dev_id); + + return (ret || dev_id != mhi_cntrl->dev_id) ? -EIO : 0; +} + +/* disable PCIe L1 */ +static int mhi_lpm_disable(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int lnkctl = pci_dev->pcie_cap + PCI_EXP_LNKCTL; + u8 val; + int ret; + + ret = pci_read_config_byte(pci_dev, lnkctl, &val); + if (ret) { + MHI_ERR("Error reading LNKCTL, ret:%d\n", ret); + return ret; + } + + /* L1 is not supported or already disabled */ + if (!(val & PCI_EXP_LNKCTL_ASPM_L1)) + return 0; + + val &= ~PCI_EXP_LNKCTL_ASPM_L1; + ret = pci_write_config_byte(pci_dev, lnkctl, val); + if (ret) { + MHI_ERR("Error writing LNKCTL to disable LPM, ret:%d\n", ret); + return ret; + } + + mhi_dev->lpm_disabled = true; + + return ret; +} + +/* enable PCIe L1 */ +static int mhi_lpm_enable(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int lnkctl = pci_dev->pcie_cap + PCI_EXP_LNKCTL; + u8 val; + int ret; + + /* L1 is not supported or already disabled */ + if (!mhi_dev->lpm_disabled) + return 0; + + ret = pci_read_config_byte(pci_dev, lnkctl, &val); + if (ret) { + MHI_ERR("Error reading LNKCTL, ret:%d\n", ret); + return ret; + } + + val |= PCI_EXP_LNKCTL_ASPM_L1; + ret = pci_write_config_byte(pci_dev, lnkctl, val); + if (ret) { + MHI_ERR("Error writing LNKCTL to enable LPM, ret:%d\n", ret); + return ret; + } + + mhi_dev->lpm_disabled = false; + + return ret; +} + +static int mhi_power_up(struct mhi_controller *mhi_cntrl) +{ + enum mhi_dev_state dev_state = mhi_get_mhi_state(mhi_cntrl); + const u32 delayus = 10; + int itr = DIV_ROUND_UP(mhi_cntrl->timeout_ms * 1000, delayus); + int ret; + + MHI_LOG("dev_state:%s\n", TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + + /* + * It's possible device did not go thru a cold reset before + * power up and still in error state. If device in error state, + * we need to trigger a soft reset before continue with power + * up + */ + if (dev_state == MHI_STATE_SYS_ERR) { + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + while (itr--) { + dev_state = mhi_get_mhi_state(mhi_cntrl); + if (dev_state != MHI_STATE_SYS_ERR) + break; + usleep_range(delayus, delayus << 1); + } + MHI_LOG("dev_state:%s\n", TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + + /* device still in error state, abort power up */ + if (dev_state == MHI_STATE_SYS_ERR) + return -EIO; + } + + ret = mhi_async_power_up(mhi_cntrl); + + /* power up create the dentry */ + if (mhi_cntrl->dentry) { + debugfs_create_file("m0", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m0_fops); + debugfs_create_file("m3", 0444, mhi_cntrl->dentry, mhi_cntrl, + &debugfs_trigger_m3_fops); + } + + return ret; +} + +static int mhi_runtime_get(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + return pm_runtime_get(dev); +} + +static void mhi_runtime_put(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + pm_runtime_mark_last_busy(dev); + pm_runtime_put(dev); +} + +static void mhi_runtime_mark_last_busy(struct mhi_controller *mhi_cntrl, void *priv) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + pm_runtime_mark_last_busy(dev); +} + +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP +static void mhi_pci_event_cb(struct msm_pcie_notify *notify) +{ + struct pci_dev *pci_dev = notify->user; + struct device *dev = &pci_dev->dev; + + dev_info(&pci_dev->dev, "Received PCIe event %d", notify->event); + switch (notify->event) { + case MSM_PCIE_EVENT_WAKEUP: + if (dev && pm_runtime_status_suspended(dev)) { + pm_request_resume(dev); + pm_runtime_mark_last_busy(dev); + } + break; + default: + break; + } +} + +static struct msm_pcie_register_event mhi_pcie_events[MAX_MHI]; +#endif + +static void mhi_status_cb(struct mhi_controller *mhi_cntrl, + void *priv, + enum MHI_CB reason) +{ + struct mhi_dev *mhi_dev = priv; + struct device *dev = &mhi_dev->pci_dev->dev; + + switch (reason) { + case MHI_CB_FATAL_ERROR: + case MHI_CB_SYS_ERROR: + pm_runtime_forbid(dev); + break; + case MHI_CB_EE_MISSION_MODE: + //pm_runtime_allow(dev); + break; + default: + break; + } +} + +/* capture host SoC XO time in ticks */ +static u64 mhi_time_get(struct mhi_controller *mhi_cntrl, void *priv) +{ + return 0; +} + +static ssize_t timeout_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + /* buffer provided by sysfs has a minimum size of PAGE_SIZE */ + return snprintf(buf, PAGE_SIZE, "%u\n", mhi_cntrl->timeout_ms); +} + +static ssize_t timeout_ms_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + u32 timeout_ms; + + if (kstrtou32(buf, 0, &timeout_ms) < 0) + return -EINVAL; + + mhi_cntrl->timeout_ms = timeout_ms; + + return count; +} +static DEVICE_ATTR_RW(timeout_ms); + +static ssize_t power_up_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int ret; + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + ret = mhi_power_up(mhi_cntrl); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_WO(power_up); + +static struct attribute *mhi_attrs[] = { + &dev_attr_timeout_ms.attr, + &dev_attr_power_up.attr, + NULL +}; + +static const struct attribute_group mhi_group = { + .attrs = mhi_attrs, +}; + +static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl; + struct mhi_dev *mhi_dev; + struct device_node *of_node = pci_dev->dev.of_node; + const struct firmware_info *firmware_info; + bool use_bb; + u64 addr_win[2]; + int ret, i; + + //if (!of_node) + // return ERR_PTR(-ENODEV); + + mhi_cntrl = mhi_alloc_controller(sizeof(*mhi_dev)); + if (!mhi_cntrl) + return ERR_PTR(-ENOMEM); + + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_cntrl->dev = &pci_dev->dev; + mhi_cntrl->domain = pci_domain_nr(pci_dev->bus); + mhi_cntrl->vendor = pci_dev->vendor; + mhi_cntrl->dev_id = pci_dev->device; + mhi_cntrl->bus = pci_dev->bus->number; + mhi_cntrl->slot = PCI_SLOT(pci_dev->devfn); + +#if 0 + use_bb = of_property_read_bool(of_node, "mhi,use-bb"); + + /* + * if s1 translation enabled or using bounce buffer pull iova addr + * from dt + */ + if (use_bb || (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH && + !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS))) { + ret = of_property_count_elems_of_size(of_node, "qti,addr-win", + sizeof(addr_win)); + if (ret != 1) + goto error_register; + ret = of_property_read_u64_array(of_node, "qti,addr-win", + addr_win, 2); + if (ret) + goto error_register; + } else { + addr_win[0] = memblock_start_of_DRAM(); + addr_win[1] = memblock_end_of_DRAM(); + } +#else + use_bb = false; + (void)use_bb; + addr_win[0] = 0x000000000; + addr_win[1] = 0x2000000000; //MHI_MEM_SIZE_DEFAULT + if (sizeof(dma_addr_t) == 4) { + addr_win[1] = 0xFFFFFFFF; + } +#endif + + mhi_dev->iova_start = addr_win[0]; + mhi_dev->iova_stop = addr_win[1]; + + /* + * If S1 is enabled, set MHI_CTRL start address to 0 so we can use low + * level mapping api to map buffers outside of smmu domain + */ + if (mhi_dev->smmu_cfg & MHI_SMMU_ATTACH && + !(mhi_dev->smmu_cfg & MHI_SMMU_S1_BYPASS)) + mhi_cntrl->iova_start = 0; + else + mhi_cntrl->iova_start = addr_win[0]; + + mhi_cntrl->iova_stop = mhi_dev->iova_stop; + mhi_cntrl->of_node = of_node; + + mhi_dev->pci_dev = pci_dev; + + /* setup power management apis */ + mhi_cntrl->status_cb = mhi_status_cb; + mhi_cntrl->runtime_get = mhi_runtime_get; + mhi_cntrl->runtime_put = mhi_runtime_put; + mhi_cntrl->runtime_mark_last_busy = mhi_runtime_mark_last_busy; + mhi_cntrl->link_status = mhi_link_status; + + mhi_cntrl->lpm_disable = mhi_lpm_disable; + mhi_cntrl->lpm_enable = mhi_lpm_enable; + mhi_cntrl->time_get = mhi_time_get; + + ret = of_register_mhi_controller(mhi_cntrl); + if (ret) + goto error_register; + + for (i = 0; i < ARRAY_SIZE(firmware_table); i++) { + firmware_info = firmware_table + i; + + /* debug mode always use default */ + if (!debug_mode && mhi_cntrl->dev_id == firmware_info->dev_id) + break; + } + +#if 0 + mhi_cntrl->fw_image = firmware_info->fw_image; + mhi_cntrl->edl_image = firmware_info->edl_image; +#endif + + if (sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj, &mhi_group)) + MHI_ERR("Error while creating the sysfs group\n"); + + return mhi_cntrl; + +error_register: + mhi_free_controller(mhi_cntrl); + + return ERR_PTR(-EINVAL); +} + +static bool mhi_pci_is_alive(struct pci_dev *pdev) +{ + u16 vendor = 0; + + if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor)) + return false; + + if (vendor == (u16) ~0 || vendor == 0) + return false; + + return true; +} + +static void mhi_pci_show_link(struct mhi_controller *mhi_cntrl, struct pci_dev *pci_dev) +{ + int pcie_cap_reg; + u16 stat; + u32 caps; + const char *speed; + + pcie_cap_reg = pci_find_capability(pci_dev, PCI_CAP_ID_EXP); + + if (!pcie_cap_reg) + return; + + pci_read_config_word(pci_dev, + pcie_cap_reg + PCI_EXP_LNKSTA, + &stat); + pci_read_config_dword(pci_dev, + pcie_cap_reg + PCI_EXP_LNKCAP, + &caps); + + switch (caps & PCI_EXP_LNKCAP_SLS) { + case PCI_EXP_LNKCAP_SLS_2_5GB: speed = "2.5"; break; + case PCI_EXP_LNKCAP_SLS_5_0GB: speed = "5"; break; + case 3: speed = "8"; break; + case 4: speed = "16"; break; + case 5: speed = "32"; break; + case 6: speed = "64"; break; + default: speed = "0"; break; + } + + MHI_LOG("LnkCap: Speed %sGT/s, Width x%d\n", speed, + (caps & PCI_EXP_LNKCAP_MLW) >> 4); + + switch (stat & PCI_EXP_LNKSTA_CLS) { + case PCI_EXP_LNKSTA_CLS_2_5GB: speed = "2.5"; break; + case PCI_EXP_LNKSTA_CLS_5_0GB: speed = "5"; break; + case 3: speed = "8"; break; + case 4: speed = "16"; break; + case 5: speed = "32"; break; + case 6: speed = "64"; break; + default: speed = "0"; break; + } + + MHI_LOG("LnkSta: Speed %sGT/s, Width x%d\n", speed, + (stat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); + +} + +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id) +{ + struct mhi_controller *mhi_cntrl; + u32 domain = pci_domain_nr(pci_dev->bus); + u32 bus = pci_dev->bus->number; + u32 dev_id = pci_dev->device; + u32 slot = PCI_SLOT(pci_dev->devfn); + struct mhi_dev *mhi_dev; + int ret; + + pr_info("%s pci_dev->name = %s, domain=%d, bus=%d, slot=%d, vendor=%04X, device=%04X\n", + __func__, dev_name(&pci_dev->dev), domain, bus, slot, pci_dev->vendor, pci_dev->device); + +#if !defined(CONFIG_PCI_MSI) + /* MT7621 RTL8198D EcoNet-EN7565 */ + #error "pcie msi is not support by this soc! and i donot support INTx (SW1SDX55-2688)" +#endif + + if (!mhi_pci_is_alive(pci_dev)) { + /* + root@OpenWrt:~# hexdump /sys/bus/pci/devices/0000:01:00.0/config + 0000000 ffff ffff ffff ffff ffff ffff ffff ffff + * + 0001000 + */ + pr_err("mhi_pci is not alive! pcie link is down\n"); + pr_err("double check by 'hexdump /sys/bus/pci/devices/%s/config'\n", dev_name(&pci_dev->dev)); + return -EIO; + } + + /* see if we already registered */ + mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id); + if (!mhi_cntrl) + mhi_cntrl = mhi_register_controller(pci_dev); + + if (IS_ERR(mhi_cntrl)) + return PTR_ERR(mhi_cntrl); + + mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + mhi_dev->powered_on = true; + + mhi_arch_iommu_init(mhi_cntrl); + + ret = mhi_arch_pcie_init(mhi_cntrl); + if (ret) + goto error_init_pci_arch; + + mhi_cntrl->dev = &pci_dev->dev; + ret = mhi_init_pci_dev(mhi_cntrl); + if (ret) + goto error_init_pci; + + /* start power up sequence */ + if (!debug_mode) { + ret = mhi_power_up(mhi_cntrl); + if (ret) + goto error_power_up; + } + + pm_runtime_mark_last_busy(&pci_dev->dev); + + mhi_pci_show_link(mhi_cntrl, pci_dev); + +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP + { + struct msm_pcie_register_event *pcie_event = &mhi_pcie_events[mhi_cntrl->cntrl_idx]; + + pcie_event->events = MSM_PCIE_EVENT_WAKEUP; +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,14,117 )) + pcie_event->pcie_event.user = pci_dev; + pcie_event->pcie_event.mode = MSM_PCIE_TRIGGER_CALLBACK; + pcie_event->pcie_event.callback = mhi_pci_event_cb; +#else + pcie_event->user = pci_dev; + pcie_event->mode = MSM_PCIE_TRIGGER_CALLBACK; + pcie_event->callback = mhi_pci_event_cb; +#endif + + ret = msm_pcie_register_event(pcie_event); + if (ret) { + MHI_LOG("Failed to register for PCIe event"); + } + } +#endif + + MHI_LOG("Return successful\n"); + + return 0; + + mhi_unregister_mhi_controller(mhi_cntrl); +error_power_up: + mhi_deinit_pci_dev(mhi_cntrl); + +error_init_pci: + mhi_arch_pcie_deinit(mhi_cntrl); +error_init_pci_arch: + mhi_arch_iommu_deinit(mhi_cntrl); + + return ret; +} + +void mhi_pci_device_removed(struct pci_dev *pci_dev) +{ + struct mhi_controller *mhi_cntrl; + u32 domain = pci_domain_nr(pci_dev->bus); + u32 bus = pci_dev->bus->number; + u32 dev_id = pci_dev->device; + u32 slot = PCI_SLOT(pci_dev->devfn); + + mhi_cntrl = mhi_bdf_to_controller(domain, bus, slot, dev_id); + + if (mhi_cntrl) { + + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP + { + struct msm_pcie_register_event *pcie_event = &mhi_pcie_events[mhi_cntrl->cntrl_idx]; + + msm_pcie_deregister_event(pcie_event); + } +#endif + + pm_stay_awake(&mhi_cntrl->mhi_dev->dev); + + /* if link is in drv suspend, wake it up */ + pm_runtime_get_sync(&pci_dev->dev); + + mutex_lock(&mhi_cntrl->pm_mutex); + if (!mhi_dev->powered_on) { + MHI_LOG("Not in active state\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + pm_runtime_put_noidle(&pci_dev->dev); + return; + } + mhi_dev->powered_on = false; + mutex_unlock(&mhi_cntrl->pm_mutex); + + pm_runtime_put_noidle(&pci_dev->dev); + + MHI_LOG("Triggering shutdown process\n"); + mhi_power_down(mhi_cntrl, false); + + /* turn the link off */ + mhi_deinit_pci_dev(mhi_cntrl); + mhi_arch_link_off(mhi_cntrl, false); + + mhi_arch_pcie_deinit(mhi_cntrl); + mhi_arch_iommu_deinit(mhi_cntrl); + + pm_relax(&mhi_cntrl->mhi_dev->dev); + + mhi_unregister_mhi_controller(mhi_cntrl); + } +} + +static const struct dev_pm_ops pm_ops = { + SET_RUNTIME_PM_OPS(mhi_runtime_suspend, + mhi_runtime_resume, + mhi_runtime_idle) + SET_SYSTEM_SLEEP_PM_OPS(mhi_system_suspend, mhi_system_resume) +}; + +static struct pci_device_id mhi_pcie_device_id[] = { + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0303)}, + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0304)}, //SDX20 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0305)}, //SDX24 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0306)}, //SDX55 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, 0x0308)}, //SDX62 + {PCI_DEVICE(0x1eac, 0x1001)}, //EM120 + {PCI_DEVICE(0x1eac, 0x1002)}, //EM160 + {PCI_DEVICE(0x1eac, 0x1004)}, //RM520 + {PCI_DEVICE(MHI_PCIE_VENDOR_ID, MHI_PCIE_DEBUG_ID)}, + {0}, +}; + +MODULE_DEVICE_TABLE(pci, mhi_pcie_device_id); + +static struct pci_driver mhi_pcie_driver = { + .name = "mhi_q", + .id_table = mhi_pcie_device_id, + .probe = mhi_pci_probe, + .remove = mhi_pci_device_removed, + .driver = { + .pm = &pm_ops + } +}; + +#if 0 +module_pci_driver(mhi_pcie_driver); +#else +int __init mhi_controller_qcom_init(void) +{ + return pci_register_driver(&mhi_pcie_driver); +}; + +void mhi_controller_qcom_exit(void) +{ + pr_info("%s enter\n", __func__); + pci_unregister_driver(&mhi_pcie_driver); + pr_info("%s exit\n", __func__); +} + +#ifdef QCOM_AP_SDM845_IOMMU_MAP +struct dma_iommu_mapping *mhi_smmu_mapping[MAX_MHI]; + +#define SMMU_BASE 0x10000000 +#define SMMU_SIZE 0x40000000 +static struct dma_iommu_mapping * sdm845_smmu_init(struct pci_dev *pdev) { + int ret = 0; + int atomic_ctx = 1; + int s1_bypass = 1; + struct dma_iommu_mapping *mapping; + + mapping = arm_iommu_create_mapping(&platform_bus_type, SMMU_BASE, SMMU_SIZE); + if (IS_ERR(mapping)) { + ret = PTR_ERR(mapping); + dev_err(&pdev->dev, "Create mapping failed, err = %d\n", ret); + return NULL; + } + + ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_ATOMIC, &atomic_ctx); + if (ret < 0) { + dev_err(&pdev->dev, "Set atomic_ctx attribute failed, err = %d\n", ret); + goto set_attr_fail; + } + + ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS, &s1_bypass); + if (ret < 0) { + dev_err(&pdev->dev, "Set s1_bypass attribute failed, err = %d\n", ret); + arm_iommu_release_mapping(mapping); + goto set_attr_fail; + } + + ret = arm_iommu_attach_device(&pdev->dev, mapping); + if (ret < 0) { + dev_err(&pdev->dev, "Attach device failed, err = %d\n", ret); + goto attach_fail; + } + + return mapping; + +attach_fail: +set_attr_fail: + arm_iommu_release_mapping(mapping); + return NULL; +} +#endif + +int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl) +{ +#ifdef QCOM_AP_SDM845_IOMMU_MAP + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + mhi_smmu_mapping[mhi_cntrl->cntrl_idx] = sdm845_smmu_init(mhi_dev->pci_dev); +#endif + + return 0; +} + +void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl) +{ +#ifdef QCOM_AP_SDM845_IOMMU_MAP + if (mhi_smmu_mapping[mhi_cntrl->cntrl_idx]) { + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + + arm_iommu_detach_device(&mhi_dev->pci_dev->dev); + arm_iommu_release_mapping(mhi_smmu_mapping[mhi_cntrl->cntrl_idx]); + mhi_smmu_mapping[mhi_cntrl->cntrl_idx] = NULL; + } +#endif +} + +static int mhi_arch_set_bus_request(struct mhi_controller *mhi_cntrl, int index) +{ + MHI_LOG("Setting bus request to index %d\n", index); + return 0; +} + +int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl) +{ +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + + if (!arch_info) { + arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev, + sizeof(*arch_info), GFP_KERNEL); + if (!arch_info) + return -ENOMEM; + + mhi_dev->arch_info = arch_info; + + /* save reference state for pcie config space */ + arch_info->ref_pcie_state = pci_store_saved_state( + mhi_dev->pci_dev); + } +#endif + + return mhi_arch_set_bus_request(mhi_cntrl, 1); +} + +void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl) +{ + mhi_arch_set_bus_request(mhi_cntrl, 0); +} + +int mhi_arch_platform_init(struct mhi_dev *mhi_dev) +{ + return 0; +} + +void mhi_arch_platform_deinit(struct mhi_dev *mhi_dev) +{ +} + +int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, + bool graceful) +{ +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + + MHI_LOG("Entered\n"); + + if (graceful) { + pci_clear_master(pci_dev); + ret = pci_save_state(mhi_dev->pci_dev); + if (ret) { + MHI_ERR("Failed with pci_save_state, ret:%d\n", ret); + return ret; + } + + arch_info->pcie_state = pci_store_saved_state(pci_dev); + pci_disable_device(pci_dev); + } + + /* + * We will always attempt to put link into D3hot, however + * link down may have happened due to error fatal, so + * ignoring the return code + */ + pci_set_power_state(pci_dev, PCI_D3hot); + + ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, mhi_cntrl->bus, pci_dev, + NULL, 0); + MHI_ERR("msm_pcie_pm_control(MSM_PCIE_SUSPEND), ret:%d\n", ret); + + /* release the resources */ + mhi_arch_set_bus_request(mhi_cntrl, 0); + + MHI_LOG("Exited\n"); +#endif + + return 0; +} + +int mhi_arch_link_on(struct mhi_controller *mhi_cntrl) +{ +#ifdef QCOM_AP_AND_EFUSE_PCIE_SLEEP + struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl); + struct arch_info *arch_info = mhi_dev->arch_info; + struct pci_dev *pci_dev = mhi_dev->pci_dev; + int ret; + + MHI_LOG("Entered\n"); + + /* request resources and establish link trainning */ + ret = mhi_arch_set_bus_request(mhi_cntrl, 1); + if (ret) + MHI_LOG("Could not set bus frequency, ret:%d\n", ret); + + ret = msm_pcie_pm_control(MSM_PCIE_RESUME, mhi_cntrl->bus, pci_dev, + NULL, 0); + MHI_LOG("msm_pcie_pm_control(MSM_PCIE_RESUME), ret:%d\n", ret); + if (ret) { + MHI_ERR("Link training failed, ret:%d\n", ret); + return ret; + } + + ret = pci_set_power_state(pci_dev, PCI_D0); + if (ret) { + MHI_ERR("Failed to set PCI_D0 state, ret:%d\n", ret); + return ret; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + MHI_ERR("Failed to enable device, ret:%d\n", ret); + return ret; + } + + ret = pci_load_and_free_saved_state(pci_dev, &arch_info->pcie_state); + if (ret) + MHI_LOG("Failed to load saved cfg state\n"); + + pci_restore_state(pci_dev); + pci_set_master(pci_dev); + + MHI_LOG("Exited\n"); +#endif + + return 0; +} +#endif diff --git a/wwan/driver/quectel_MHI/src/controllers/mhi_qti.h b/wwan/driver/quectel_MHI/src/controllers/mhi_qti.h new file mode 100644 index 0000000..7ac021a --- /dev/null +++ b/wwan/driver/quectel_MHI/src/controllers/mhi_qti.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#ifndef _MHI_QTI_ +#define _MHI_QTI_ + +/* iova cfg bitmask */ +#define MHI_SMMU_ATTACH BIT(0) +#define MHI_SMMU_S1_BYPASS BIT(1) +#define MHI_SMMU_FAST BIT(2) +#define MHI_SMMU_ATOMIC BIT(3) +#define MHI_SMMU_FORCE_COHERENT BIT(4) + +#define MHI_PCIE_VENDOR_ID (0x17cb) +#define MHI_PCIE_DEBUG_ID (0xffff) + +/* runtime suspend timer */ +#define MHI_RPM_SUSPEND_TMR_MS (2000) +#define MHI_PCI_BAR_NUM (0) + +struct mhi_dev { + struct pci_dev *pci_dev; + u32 smmu_cfg; + int resn; + void *arch_info; + bool powered_on; + dma_addr_t iova_start; + dma_addr_t iova_stop; + bool lpm_disabled; +}; + +void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl); +int mhi_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *device_id); + +void mhi_pci_device_removed(struct pci_dev *pci_dev); +int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl); +void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl); +int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl); +void mhi_arch_iommu_deinit(struct mhi_controller *mhi_cntrl); +int mhi_arch_link_off(struct mhi_controller *mhi_cntrl, bool graceful); +int mhi_arch_link_on(struct mhi_controller *mhi_cntrl); + +#endif /* _MHI_QTI_ */ diff --git a/wwan/driver/quectel_MHI/src/core/Makefile b/wwan/driver/quectel_MHI/src/core/Makefile new file mode 100644 index 0000000..a743fbf --- /dev/null +++ b/wwan/driver/quectel_MHI/src/core/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MHI_BUS) +=mhi_init.o mhi_main.o mhi_pm.o mhi_boot.o mhi_dtr.o diff --git a/wwan/driver/quectel_MHI/src/core/mhi.h b/wwan/driver/quectel_MHI/src/core/mhi.h new file mode 100644 index 0000000..fad6504 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/core/mhi.h @@ -0,0 +1,908 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#ifndef _MHI_H_ +#define _MHI_H_ + +#define PCIE_MHI_DRIVER_VERSION "V1.3.6" +#define ENABLE_MHI_MON +//#define ENABLE_IP_SW0 + +// #define ENABLE_ADPL + +// #define ENABLE_QDSS + +#include +typedef enum +{ + MHI_CLIENT_LOOPBACK_OUT = 0, + MHI_CLIENT_LOOPBACK_IN = 1, + MHI_CLIENT_SAHARA_OUT = 2, + MHI_CLIENT_SAHARA_IN = 3, + MHI_CLIENT_DIAG_OUT = 4, + MHI_CLIENT_DIAG_IN = 5, + MHI_CLIENT_SSR_OUT = 6, + MHI_CLIENT_SSR_IN = 7, + MHI_CLIENT_QDSS_OUT = 8, + MHI_CLIENT_QDSS_IN = 9, + MHI_CLIENT_EFS_OUT = 10, + MHI_CLIENT_EFS_IN = 11, + MHI_CLIENT_MBIM_OUT = 12, + MHI_CLIENT_MBIM_IN = 13, + MHI_CLIENT_QMI_OUT = 14, + MHI_CLIENT_QMI_IN = 15, + MHI_CLIENT_QMI_2_OUT = 16, + MHI_CLIENT_QMI_2_IN = 17, + MHI_CLIENT_IP_CTRL_1_OUT = 18, + MHI_CLIENT_IP_CTRL_1_IN = 19, + MHI_CLIENT_IPCR_OUT = 20, + MHI_CLIENT_IPCR_IN = 21, + MHI_CLIENT_TEST_FW_OUT = 22, + MHI_CLIENT_TEST_FW_IN = 23, + MHI_CLIENT_RESERVED_0 = 24, + MHI_CLIENT_BOOT_LOG_IN = 25, + MHI_CLIENT_DCI_OUT = 26, + MHI_CLIENT_DCI_IN = 27, + MHI_CLIENT_QBI_OUT = 28, + MHI_CLIENT_QBI_IN = 29, + MHI_CLIENT_RESERVED_1_LOWER = 30, + MHI_CLIENT_RESERVED_1_UPPER = 31, + MHI_CLIENT_DUN_OUT = 32, + MHI_CLIENT_DUN_IN = 33, + MHI_CLIENT_EDL_OUT = 34, + MHI_CLIENT_EDL_IN = 35, + MHI_CLIENT_ADB_FB_OUT = 36, + MHI_CLIENT_ADB_FB_IN = 37, + MHI_CLIENT_RESERVED_2_LOWER = 38, + MHI_CLIENT_RESERVED_2_UPPER = 41, + MHI_CLIENT_CSVT_OUT = 42, + MHI_CLIENT_CSVT_IN = 43, + MHI_CLIENT_SMCT_OUT = 44, + MHI_CLIENT_SMCT_IN = 45, + MHI_CLIENT_IP_SW_0_OUT = 46, + MHI_CLIENT_IP_SW_0_IN = 47, + MHI_CLIENT_IP_SW_1_OUT = 48, + MHI_CLIENT_IP_SW_1_IN = 49, + MHI_CLIENT_RESERVED_3_LOWER = 50, + MHI_CLIENT_RESERVED_3_UPPER = 59, + MHI_CLIENT_TEST_0_OUT = 60, + MHI_CLIENT_TEST_0_IN = 61, + MHI_CLIENT_TEST_1_OUT = 62, + MHI_CLIENT_TEST_1_IN = 63, + MHI_CLIENT_TEST_2_OUT = 64, + MHI_CLIENT_TEST_2_IN = 65, + MHI_CLIENT_TEST_3_OUT = 66, + MHI_CLIENT_TEST_3_IN = 67, + MHI_CLIENT_RESERVED_4_LOWER = 68, + MHI_CLIENT_RESERVED_4_UPPER = 91, + MHI_CLIENT_OEM_0_OUT = 92, + MHI_CLIENT_OEM_0_IN = 93, + MHI_CLIENT_OEM_1_OUT = 94, + MHI_CLIENT_OEM_1_IN = 95, + MHI_CLIENT_OEM_2_OUT = 96, + MHI_CLIENT_OEM_2_IN = 97, + MHI_CLIENT_OEM_3_OUT = 98, + MHI_CLIENT_OEM_3_IN = 99, + MHI_CLIENT_IP_HW_0_OUT = 100, + MHI_CLIENT_IP_HW_0_IN = 101, + MHI_CLIENT_ADPL = 102, + MHI_CLIENT_IP_HW_QDSS = 103, + // MHI_CLIENT_RESERVED_5_LOWER = 103, + MHI_CLIENT_RESERVED_5_UPPER = 127, + MHI_MAX_CHANNELS = 128 +}MHI_CLIENT_CHANNEL_TYPE; + +/* Event Ring Index */ +typedef enum +{ + SW_EVT_RING = 0, + PRIMARY_EVENT_RING = SW_EVT_RING, +#ifdef ENABLE_IP_SW0 + SW_0_OUT_EVT_RING, + SW_0_IN_EVT_RING, +#endif + IPA_OUT_EVENT_RING, + IPA_IN_EVENT_RING, +#ifdef ENABLE_ADPL + ADPL_EVT_RING, +#endif +#ifdef ENABLE_QDSS + QDSS_EVT_RING, +#endif + + MAX_EVT_RING_IDX +}MHI_EVT_RING_IDX; + +#define MHI_VERSION 0x01000000 +#define MHIREGLEN_VALUE 0x100 /* **** WRONG VALUE *** */ +#define MHI_MSI_INDEX 1 +#define MAX_NUM_MHI_DEVICES 1 +#define NUM_MHI_XFER_RINGS 128 +#define NUM_MHI_EVT_RINGS MAX_EVT_RING_IDX +#define NUM_MHI_HW_EVT_RINGS 4 +#define NUM_MHI_XFER_RING_ELEMENTS 16 +#define NUM_MHI_EVT_RING_ELEMENTS (NUM_MHI_IPA_IN_RING_ELEMENTS*2) //must *2, event ring full will make x55 dump +#define NUM_MHI_IPA_IN_RING_ELEMENTS 512 +#define NUM_MHI_IPA_OUT_RING_ELEMENTS 512 //donot use ul agg, so increase +#define NUM_MHI_DIAG_IN_RING_ELEMENTS 128 +#define NUM_MHI_SW_IP_RING_ELEMENTS 512 + +#ifdef ENABLE_ADPL +#define NUM_MHI_ADPL_RING_ELEMENTS 256 +#endif + +#ifdef ENABLE_QDSS +#define NUM_MHI_QDSS_RING_ELEMENTS 256 +#endif + +/* +* for if set Interrupt moderation time as 1ms, +and transfer more than NUM_MHI_CHAN_RING_ELEMENTS data are sent to the modem in 1ms. +e.g. firehose upgrade. +modem will not trigger irq for these transfer. +*/ +#define NUM_MHI_CHAN_RING_ELEMENTS 32 //8 +#define MHI_EVT_CMD_QUEUE_SIZE 160 +#define MHI_EVT_STATE_QUEUE_SIZE 128 +#define MHI_EVT_XFER_QUEUE_SIZE 1024 + +#define CHAN_INBOUND(_x) ((_x)%2) + +#define CHAN_SBL(_x) (((_x) == MHI_CLIENT_SAHARA_OUT) || \ + ((_x) == MHI_CLIENT_SAHARA_IN) || \ + ((_x) == MHI_CLIENT_BOOT_LOG_IN)) + +#define CHAN_EDL(_x) (((_x) == MHI_CLIENT_EDL_OUT) || \ + ((_x) == MHI_CLIENT_EDL_IN)) + +struct mhi_chan; +struct mhi_event; +struct mhi_ctxt; +struct mhi_cmd; +struct image_info; +struct bhi_vec_entry; +struct mhi_timesync; +struct mhi_buf_info; + +/** + * enum MHI_CB - MHI callback + * @MHI_CB_IDLE: MHI entered idle state + * @MHI_CB_PENDING_DATA: New data available for client to process + * @MHI_CB_LPM_ENTER: MHI host entered low power mode + * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode + * @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment + * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode exec env + * @MHI_CB_SYS_ERROR: MHI device enter error state (may recover) + * @MHI_CB_FATAL_ERROR: MHI device entered fatal error + */ +enum MHI_CB { + MHI_CB_IDLE, + MHI_CB_PENDING_DATA, + MHI_CB_LPM_ENTER, + MHI_CB_LPM_EXIT, + MHI_CB_EE_RDDM, + MHI_CB_EE_MISSION_MODE, + MHI_CB_SYS_ERROR, + MHI_CB_FATAL_ERROR, +}; + +/** + * enum MHI_DEBUG_LEVL - various debugging level + */ +enum MHI_DEBUG_LEVEL { + MHI_MSG_LVL_VERBOSE, + MHI_MSG_LVL_INFO, + MHI_MSG_LVL_ERROR, + MHI_MSG_LVL_CRITICAL, + MHI_MSG_LVL_MASK_ALL, +}; + +/* +GSI_XFER_FLAG_BEI: Block event interrupt +1: Event generated by this ring element must not assert an interrupt to the host +0: Event generated by this ring element must assert an interrupt to the host + +GSI_XFER_FLAG_EOT: Interrupt on end of transfer +1: If an EOT condition is encountered when processing this ring element, an event is generated by the device with its completion code set to EOT. +0: If an EOT condition is encountered for this ring element, a completion event is not be generated by the device, unless IEOB is 1 + +GSI_XFER_FLAG_EOB: Interrupt on end of block +1: Device notifies host after processing this ring element by sending a completion event +0: Completion event is not required after processing this ring element + +GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring elements in a TD +*/ + +/** + * enum MHI_FLAGS - Transfer flags + * @MHI_EOB: End of buffer for bulk transfer + * @MHI_EOT: End of transfer + * @MHI_CHAIN: Linked transfer + */ +enum MHI_FLAGS { + MHI_EOB, + MHI_EOT, + MHI_CHAIN, +}; + +/** + * enum mhi_device_type - Device types + * @MHI_XFER_TYPE: Handles data transfer + * @MHI_TIMESYNC_TYPE: Use for timesync feature + * @MHI_CONTROLLER_TYPE: Control device + */ +enum mhi_device_type { + MHI_XFER_TYPE, + MHI_TIMESYNC_TYPE, + MHI_CONTROLLER_TYPE, +}; + +/** + * enum mhi_ee - device current execution enviornment + * @MHI_EE_PBL - device in PBL + * @MHI_EE_SBL - device in SBL + * @MHI_EE_AMSS - device in mission mode (firmware fully loaded) + * @MHI_EE_RDDM - device in ram dump collection mode + * @MHI_EE_WFW - device in WLAN firmware mode + * @MHI_EE_PTHRU - device in PBL but configured in pass thru mode + * @MHI_EE_EDL - device in emergency download mode + */ +enum mhi_ee { + MHI_EE_PBL = 0x0, + MHI_EE_SBL = 0x1, + MHI_EE_AMSS = 0x2, + MHI_EE_RDDM = 0x3, + MHI_EE_WFW = 0x4, + MHI_EE_PTHRU = 0x5, + MHI_EE_EDL = 0x6, + MHI_EE_FP = 0x7, /* FlashProg, Flash Programmer Environment */ + MHI_EE_MAX_SUPPORTED = MHI_EE_FP, + MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */ + MHI_EE_MAX, +}; + +/** + * enum mhi_dev_state - device current MHI state + */ +enum mhi_dev_state { + MHI_STATE_RESET = 0x0, + MHI_STATE_READY = 0x1, + MHI_STATE_M0 = 0x2, + MHI_STATE_M1 = 0x3, + MHI_STATE_M2 = 0x4, + MHI_STATE_M3 = 0x5, + MHI_STATE_BHI = 0x7, + MHI_STATE_SYS_ERR = 0xFF, + MHI_STATE_MAX, +}; + +extern const char * const mhi_ee_str[MHI_EE_MAX]; +#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \ + "INVALID_EE" : mhi_ee_str[ee]) + +/** + * struct image_info - firmware and rddm table table + * @mhi_buf - Contain device firmware and rddm table + * @entries - # of entries in table + */ +struct image_info { + struct mhi_buf *mhi_buf; + struct bhi_vec_entry *bhi_vec; + u32 entries; +}; + +/** + * struct mhi_controller - Master controller structure for external modem + * @dev: Device associated with this controller + * @of_node: DT that has MHI configuration information + * @regs: Points to base of MHI MMIO register space + * @bhi: Points to base of MHI BHI register space + * @bhie: Points to base of MHI BHIe register space + * @wake_db: MHI WAKE doorbell register address + * @dev_id: PCIe device id of the external device + * @domain: PCIe domain the device connected to + * @bus: PCIe bus the device assigned to + * @slot: PCIe slot for the modem + * @iova_start: IOMMU starting address for data + * @iova_stop: IOMMU stop address for data + * @fw_image: Firmware image name for normal booting + * @edl_image: Firmware image name for emergency download mode + * @fbc_download: MHI host needs to do complete image transfer + * @rddm_size: RAM dump size that host should allocate for debugging purpose + * @sbl_size: SBL image size + * @seg_len: BHIe vector size + * @fbc_image: Points to firmware image buffer + * @rddm_image: Points to RAM dump buffer + * @max_chan: Maximum number of channels controller support + * @mhi_chan: Points to channel configuration table + * @lpm_chans: List of channels that require LPM notifications + * @total_ev_rings: Total # of event rings allocated + * @hw_ev_rings: Number of hardware event rings + * @sw_ev_rings: Number of software event rings + * @msi_required: Number of msi required to operate + * @msi_allocated: Number of msi allocated by bus master + * @irq: base irq # to request + * @mhi_event: MHI event ring configurations table + * @mhi_cmd: MHI command ring configurations table + * @mhi_ctxt: MHI device context, shared memory between host and device + * @timeout_ms: Timeout in ms for state transitions + * @pm_state: Power management state + * @ee: MHI device execution environment + * @dev_state: MHI STATE + * @status_cb: CB function to notify various power states to but master + * @link_status: Query link status in case of abnormal value read from device + * @runtime_get: Async runtime resume function + * @runtimet_put: Release votes + * @time_get: Return host time in us + * @lpm_disable: Request controller to disable link level low power modes + * @lpm_enable: Controller may enable link level low power modes again + * @priv_data: Points to bus master's private data + */ +struct mhi_controller { + struct list_head node; + struct mhi_device *mhi_dev; + + /* device node for iommu ops */ + struct device *dev; + struct device_node *of_node; + + /* mmio base */ + phys_addr_t base_addr; + void __iomem *regs; + void __iomem *bhi; + void __iomem *bhie; + void __iomem *wake_db; + + /* device topology */ + u32 vendor; + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; + u32 cntrl_idx; + struct device *cntrl_dev; + + /* addressing window */ + dma_addr_t iova_start; + dma_addr_t iova_stop; + + /* fw images */ + const char *fw_image; + const char *edl_image; + + /* mhi host manages downloading entire fbc images */ + bool fbc_download; + size_t rddm_size; + size_t sbl_size; + size_t seg_len; + u32 session_id; + u32 sequence_id; + struct image_info *fbc_image; + struct image_info *rddm_image; + + /* physical channel config data */ + u32 max_chan; + struct mhi_chan *mhi_chan; + struct list_head lpm_chans; /* these chan require lpm notification */ + + /* physical event config data */ + u32 total_ev_rings; + u32 hw_ev_rings; + u32 sw_ev_rings; + u32 msi_required; + u32 msi_allocated; + u32 msi_irq_base; + int *irq; /* interrupt table */ + struct mhi_event *mhi_event; + + /* cmd rings */ + struct mhi_cmd *mhi_cmd; + + /* mhi context (shared with device) */ + struct mhi_ctxt *mhi_ctxt; + + u32 timeout_ms; + + /* caller should grab pm_mutex for suspend/resume operations */ + struct mutex pm_mutex; + bool pre_init; + rwlock_t pm_lock; + u32 pm_state; + enum mhi_ee ee; + enum mhi_dev_state dev_state; + bool wake_set; + atomic_t dev_wake; + atomic_t alloc_size; + atomic_t pending_pkts; + struct list_head transition_list; + spinlock_t transition_lock; + spinlock_t wlock; + + /* debug counters */ + u32 M0, M2, M3; + + /* worker for different state transitions */ + struct work_struct st_worker; + struct work_struct fw_worker; + struct work_struct syserr_worker; + struct delayed_work ready_worker; + wait_queue_head_t state_event; + + /* shadow functions */ + void (*status_cb)(struct mhi_controller *mhi_cntrl, void *priv, + enum MHI_CB reason); + int (*link_status)(struct mhi_controller *mhi_cntrl, void *priv); + void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); + void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); + int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv); + void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv); + void (*runtime_mark_last_busy)(struct mhi_controller *mhi_cntrl, void *priv); + u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv); + int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv); + int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv); + int (*map_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + void (*unmap_single)(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf); + + /* channel to control DTR messaging */ + struct mhi_device *dtr_dev; + + /* bounce buffer settings */ + bool bounce_buf; + size_t buffer_len; + + /* supports time sync feature */ + struct mhi_timesync *mhi_tsync; + struct mhi_device *tsync_dev; + + /* kernel log level */ + enum MHI_DEBUG_LEVEL klog_lvl; + int klog_slient; + + /* private log level controller driver to set */ + enum MHI_DEBUG_LEVEL log_lvl; + + /* controller specific data */ + void *priv_data; + void *log_buf; + struct dentry *dentry; + struct dentry *parent; + + struct miscdevice miscdev; + +#ifdef ENABLE_MHI_MON + spinlock_t lock; + + /* Ref */ + int nreaders; /* Under mon_lock AND mbus->lock */ + struct list_head r_list; /* Chain of readers (usually one) */ + struct kref ref; /* Under mon_lock */ + + /* Stats */ + unsigned int cnt_events; + unsigned int cnt_text_lost; +#endif +}; + +#ifdef ENABLE_MHI_MON +struct mhi_tre; +struct mon_reader { + struct list_head r_link; + struct mhi_controller *m_bus; + void *r_data; /* Use container_of instead? */ + + void (*rnf_submit)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len); + void (*rnf_receive)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len); + void (*rnf_complete)(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre); +}; +#endif + +/** + * struct mhi_device - mhi device structure associated bind to channel + * @dev: Device associated with the channels + * @mtu: Maximum # of bytes controller support + * @ul_chan_id: MHI channel id for UL transfer + * @dl_chan_id: MHI channel id for DL transfer + * @tiocm: Device current terminal settings + * @priv: Driver private data + */ +struct mhi_device { + struct device dev; + u32 vendor; + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; + size_t mtu; + int ul_chan_id; + int dl_chan_id; + int ul_event_id; + int dl_event_id; + u32 tiocm; + const struct mhi_device_id *id; + const char *chan_name; + struct mhi_controller *mhi_cntrl; + struct mhi_chan *ul_chan; + struct mhi_chan *dl_chan; + atomic_t dev_wake; + enum mhi_device_type dev_type; + void *priv_data; + int (*ul_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS flags); + int (*dl_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t size, enum MHI_FLAGS flags); + void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB reason); +}; + +/** + * struct mhi_result - Completed buffer information + * @buf_addr: Address of data buffer + * @dir: Channel direction + * @bytes_xfer: # of bytes transferred + * @transaction_status: Status of last trasnferred + */ +struct mhi_result { + void *buf_addr; + enum dma_data_direction dir; + size_t bytes_xferd; + int transaction_status; +}; + +/** + * struct mhi_buf - Describes the buffer + * @page: buffer as a page + * @buf: cpu address for the buffer + * @phys_addr: physical address of the buffer + * @dma_addr: iommu address for the buffer + * @skb: skb of ip packet + * @len: # of bytes + * @name: Buffer label, for offload channel configurations name must be: + * ECA - Event context array data + * CCA - Channel context array data + */ +struct mhi_buf { + struct list_head node; + struct page *page; + void *buf; + phys_addr_t phys_addr; + dma_addr_t dma_addr; + struct sk_buff *skb; + size_t len; + const char *name; /* ECA, CCA */ +}; + +/** + * struct mhi_driver - mhi driver information + * @id_table: NULL terminated channel ID names + * @ul_xfer_cb: UL data transfer callback + * @dl_xfer_cb: DL data transfer callback + * @status_cb: Asynchronous status callback + */ +struct mhi_driver { + const struct mhi_device_id *id_table; + int (*probe)(struct mhi_device *mhi_dev, + const struct mhi_device_id *id); + void (*remove)(struct mhi_device *mhi_dev); + void (*ul_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res); + void (*dl_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res); + void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb); + struct device_driver driver; +}; + +#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver) +#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev) + +static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev, + void *priv) +{ + mhi_dev->priv_data = priv; +} + +static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev) +{ + return mhi_dev->priv_data; +} + +/** + * mhi_queue_transfer - Queue a buffer to hardware + * All transfers are asyncronous transfers + * @mhi_dev: Device associated with the channels + * @dir: Data direction + * @buf: Data buffer (skb for hardware channels) + * @len: Size in bytes + * @mflags: Interrupt flags for the device + */ +static inline int mhi_queue_transfer(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + if (dir == DMA_TO_DEVICE) + return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len, + mflags); + else + return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len, + mflags); +} + +static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl) +{ + return mhi_cntrl->priv_data; +} + +static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl) +{ + kfree(mhi_cntrl); +} + +/** + * mhi_driver_register - Register driver with MHI framework + * @mhi_drv: mhi_driver structure + */ +int mhi_driver_register(struct mhi_driver *mhi_drv); + +/** + * mhi_driver_unregister - Unregister a driver for mhi_devices + * @mhi_drv: mhi_driver structure + */ +void mhi_driver_unregister(struct mhi_driver *mhi_drv); + +/** + * mhi_device_configure - configure ECA or CCA context + * For offload channels that client manage, call this + * function to configure channel context or event context + * array associated with the channel + * @mhi_div: Device associated with the channels + * @dir: Direction of the channel + * @mhi_buf: Configuration data + * @elements: # of configuration elements + */ +int mhi_device_configure(struct mhi_device *mhi_div, + enum dma_data_direction dir, + struct mhi_buf *mhi_buf, + int elements); + +/** + * mhi_device_get - disable all low power modes + * Only disables lpm, does not immediately exit low power mode + * if controller already in a low power mode + * @mhi_dev: Device associated with the channels + */ +void mhi_device_get(struct mhi_device *mhi_dev); + +/** + * mhi_device_get_sync - disable all low power modes + * Synchronously disable all low power, exit low power mode if + * controller already in a low power state + * @mhi_dev: Device associated with the channels + */ +int mhi_device_get_sync(struct mhi_device *mhi_dev); + +/** + * mhi_device_put - re-enable low power modes + * @mhi_dev: Device associated with the channels + */ +void mhi_device_put(struct mhi_device *mhi_dev); + +/** + * mhi_prepare_for_transfer - setup channel for data transfer + * Moves both UL and DL channel from RESET to START state + * @mhi_dev: Device associated with the channels + */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_unprepare_from_transfer -unprepare the channels + * Moves both UL and DL channels to RESET state + * @mhi_dev: Device associated with the channels + */ +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev); + +/** + * mhi_get_no_free_descriptors - Get transfer ring length + * Get # of TD available to queue buffers + * @mhi_dev: Device associated with the channels + * @dir: Direction of the channel + */ +int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev, + enum dma_data_direction dir); + +/** + * mhi_poll - poll for any available data to consume + * This is only applicable for DL direction + * @mhi_dev: Device associated with the channels + * @budget: In descriptors to service before returning + */ +int mhi_poll(struct mhi_device *mhi_dev, u32 budget); + +/** + * mhi_ioctl - user space IOCTL support for MHI channels + * Native support for setting TIOCM + * @mhi_dev: Device associated with the channels + * @cmd: IOCTL cmd + * @arg: Optional parameter, iotcl cmd specific + */ +long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg); + +/** + * mhi_alloc_controller - Allocate mhi_controller structure + * Allocate controller structure and additional data for controller + * private data. You may get the private data pointer by calling + * mhi_controller_get_devdata + * @size: # of additional bytes to allocate + */ +struct mhi_controller *mhi_alloc_controller(size_t size); + +/** + * of_register_mhi_controller - Register MHI controller + * Registers MHI controller with MHI bus framework. DT must be supported + * @mhi_cntrl: MHI controller to register + */ +int of_register_mhi_controller(struct mhi_controller *mhi_cntrl); + +void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl); + +/** + * mhi_bdf_to_controller - Look up a registered controller + * Search for controller based on device identification + * @domain: RC domain of the device + * @bus: Bus device connected to + * @slot: Slot device assigned to + * @dev_id: Device Identification + */ +struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot, + u32 dev_id); + +/** + * mhi_prepare_for_power_up - Do pre-initialization before power up + * This is optional, call this before power up if controller do not + * want bus framework to automatically free any allocated memory during shutdown + * process. + * @mhi_cntrl: MHI controller + */ +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_async_power_up - Starts MHI power up sequence + * @mhi_cntrl: MHI controller + */ +int mhi_async_power_up(struct mhi_controller *mhi_cntrl); +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); + +/** + * mhi_power_down - Start MHI power down sequence + * @mhi_cntrl: MHI controller + * @graceful: link is still accessible, do a graceful shutdown process otherwise + * we will shutdown host w/o putting device into RESET state + */ +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); + +/** + * mhi_unprepare_after_powre_down - free any allocated memory for power up + * @mhi_cntrl: MHI controller + */ +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_suspend - Move MHI into a suspended state + * Transition to MHI state M3 state from M0||M1||M2 state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_resume - Resume MHI from suspended state + * Transition to MHI state M0 state from M3 state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_resume(struct mhi_controller *mhi_cntrl); + +/** + * mhi_download_rddm_img - Download ramdump image from device for + * debugging purpose. + * @mhi_cntrl: MHI controller + * @in_panic: If we trying to capture image while in kernel panic + */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic); + +/** + * mhi_force_rddm_mode - Force external device into rddm mode + * to collect device ramdump. This is useful if host driver assert + * and we need to see device state as well. + * @mhi_cntrl: MHI controller + */ +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); + +/** + * mhi_get_remote_time_sync - Get external soc time relative to local soc time + * using MMIO method. + * @mhi_dev: Device associated with the channels + * @t_host: Pointer to output local soc time + * @t_dev: Pointer to output remote soc time + */ +int mhi_get_remote_time_sync(struct mhi_device *mhi_dev, + u64 *t_host, + u64 *t_dev); + +/** + * mhi_get_mhi_state - Return MHI state of device + * @mhi_cntrl: MHI controller + */ +enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl); + +/** + * mhi_set_mhi_state - Set device state + * @mhi_cntrl: MHI controller + * @state: state to set + */ +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, + enum mhi_dev_state state); + + +/** + * mhi_is_active - helper function to determine if MHI in active state + * @mhi_dev: client device + */ +static inline bool mhi_is_active(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + return (mhi_cntrl->dev_state >= MHI_STATE_M0 && + mhi_cntrl->dev_state <= MHI_STATE_M3); +} + +/** + * mhi_debug_reg_dump - dump MHI registers for debug purpose + * @mhi_cntrl: MHI controller + */ +void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl); + +#ifdef CONFIG_MHI_DEBUG + +#define MHI_VERB(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_debug("[D][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ +} while (0) + +#else + +#define MHI_VERB(fmt, ...) + +#endif + +#define MHI_LOG(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \ + pr_info("[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ + else if (!mhi_cntrl->klog_slient) \ + printk(KERN_DEBUG "[I][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MHI_ERR(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \ +} while (0) + +#define MHI_CRITICAL(fmt, ...) do { \ + if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \ + pr_alert("[C][mhi%d][%s] " fmt, mhi_cntrl->cntrl_idx, __func__, ##__VA_ARGS__); \ +} while (0) + +int mhi_register_mhi_controller(struct mhi_controller *mhi_cntrl); +void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl); + +#ifndef MHI_NAME_SIZE +#define MHI_NAME_SIZE 32 +/** + * * struct mhi_device_id - MHI device identification + * * @chan: MHI channel name + * * @driver_data: driver data; + * */ +struct mhi_device_id { + const char chan[MHI_NAME_SIZE]; + unsigned long driver_data; +}; +#endif + +#endif /* _MHI_H_ */ diff --git a/wwan/driver/quectel_MHI/src/core/mhi_boot.c b/wwan/driver/quectel_MHI/src/core/mhi_boot.c new file mode 100644 index 0000000..8f1924f --- /dev/null +++ b/wwan/driver/quectel_MHI/src/core/mhi_boot.c @@ -0,0 +1,860 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +/* Software defines */ +/* BHI Version */ +#define BHI_MAJOR_VERSION 0x1 +#define BHI_MINOR_VERSION 0x1 + +#define MSMHWID_NUMDWORDS 6 /* Number of dwords that make the MSMHWID */ +#define OEMPKHASH_NUMDWORDS 48 /* Number of dwords that make the OEM PK HASH */ + +#define IsPBLExecEnv(ExecEnv) ((ExecEnv == MHI_EE_PBL) || (ExecEnv == MHI_EE_EDL) ) + +typedef u32 ULONG; + +typedef struct _bhi_info_type +{ + ULONG bhi_ver_minor; + ULONG bhi_ver_major; + ULONG bhi_image_address_low; + ULONG bhi_image_address_high; + ULONG bhi_image_size; + ULONG bhi_rsvd1; + ULONG bhi_imgtxdb; + ULONG bhi_rsvd2; + ULONG bhi_msivec; + ULONG bhi_rsvd3; + ULONG bhi_ee; + ULONG bhi_status; + ULONG bhi_errorcode; + ULONG bhi_errdbg1; + ULONG bhi_errdbg2; + ULONG bhi_errdbg3; + ULONG bhi_sernum; + ULONG bhi_sblantirollbackver; + ULONG bhi_numsegs; + ULONG bhi_msmhwid[6]; + ULONG bhi_oempkhash[48]; + ULONG bhi_rsvd5; +}BHI_INFO_TYPE, *PBHI_INFO_TYPE; + +static void PrintBhiInfo(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info) +{ + ULONG index; + char str[128]; + + MHI_LOG("BHI Device Info...\n"); + MHI_LOG("BHI Version = { Major = 0x%X Minor = 0x%X}\n", bhi_info->bhi_ver_major, bhi_info->bhi_ver_minor); + MHI_LOG("BHI Execution Environment = 0x%X\n", bhi_info->bhi_ee); + MHI_LOG("BHI Status = 0x%X\n", bhi_info->bhi_status); + MHI_LOG("BHI Error code = 0x%X { Dbg1 = 0x%X Dbg2 = 0x%X Dbg3 = 0x%X }\n", bhi_info->bhi_errorcode, bhi_info->bhi_errdbg1, bhi_info->bhi_errdbg2, bhi_info->bhi_errdbg3); + MHI_LOG("BHI Serial Number = 0x%X\n", bhi_info->bhi_sernum); + MHI_LOG("BHI SBL Anti-Rollback Ver = 0x%X\n", bhi_info->bhi_sblantirollbackver); + MHI_LOG("BHI Number of Segments = 0x%X\n", bhi_info->bhi_numsegs); + for (index = 0; index < 6; index++) + { + snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_msmhwid[index]); + } + MHI_LOG("BHI MSM HW-Id = %s\n", str); + + for (index = 0; index < 24; index++) + { + snprintf(str+3*index, sizeof(str)-3*index, "%02x ", bhi_info->bhi_oempkhash[index]); + } + MHI_LOG("BHI OEM PK Hash = %s\n", str); +} + +static u32 bhi_read_reg(struct mhi_controller *mhi_cntrl, u32 offset) +{ + u32 out = 0; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &out); + + return (ret) ? 0 : out; +} + +static int BhiRead(struct mhi_controller *mhi_cntrl, BHI_INFO_TYPE *bhi_info) +{ + ULONG index; + + memset(bhi_info, 0x00, sizeof(BHI_INFO_TYPE)); + + /* bhi_ver */ + bhi_info->bhi_ver_minor = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR); + bhi_info->bhi_ver_major = bhi_read_reg(mhi_cntrl, BHI_BHIVERSION_MINOR); + bhi_info->bhi_image_address_low = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_LOW); + bhi_info->bhi_image_address_high = bhi_read_reg(mhi_cntrl, BHI_IMGADDR_HIGH); + bhi_info->bhi_image_size = bhi_read_reg(mhi_cntrl, BHI_IMGSIZE); + bhi_info->bhi_rsvd1 = bhi_read_reg(mhi_cntrl, BHI_RSVD1); + bhi_info->bhi_imgtxdb = bhi_read_reg(mhi_cntrl, BHI_IMGTXDB); + bhi_info->bhi_rsvd2 = bhi_read_reg(mhi_cntrl, BHI_RSVD2); + bhi_info->bhi_msivec = bhi_read_reg(mhi_cntrl, BHI_INTVEC); + bhi_info->bhi_rsvd3 = bhi_read_reg(mhi_cntrl, BHI_RSVD3); + bhi_info->bhi_ee = bhi_read_reg(mhi_cntrl, BHI_EXECENV); + bhi_info->bhi_status = bhi_read_reg(mhi_cntrl, BHI_STATUS); + bhi_info->bhi_errorcode = bhi_read_reg(mhi_cntrl, BHI_ERRCODE); + bhi_info->bhi_errdbg1 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG1); + bhi_info->bhi_errdbg2 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG2); + bhi_info->bhi_errdbg3 = bhi_read_reg(mhi_cntrl, BHI_ERRDBG3); + bhi_info->bhi_sernum = bhi_read_reg(mhi_cntrl, BHI_SERIALNU); + bhi_info->bhi_sblantirollbackver = bhi_read_reg(mhi_cntrl, BHI_SBLANTIROLLVER); + bhi_info->bhi_numsegs = bhi_read_reg(mhi_cntrl, BHI_NUMSEG); + for (index = 0; index < MSMHWID_NUMDWORDS; index++) + { + bhi_info->bhi_msmhwid[index] = bhi_read_reg(mhi_cntrl, BHI_MSMHWID(index)); + } + for (index = 0; index < OEMPKHASH_NUMDWORDS; index++) + { + bhi_info->bhi_oempkhash[index] = bhi_read_reg(mhi_cntrl, BHI_OEMPKHASH(index)); + } + bhi_info->bhi_rsvd5 = bhi_read_reg(mhi_cntrl, BHI_RSVD5); + PrintBhiInfo(mhi_cntrl, bhi_info); + /* Check the Execution Environment */ + if (!IsPBLExecEnv(bhi_info->bhi_ee)) + { + MHI_LOG("E - EE: 0x%X Expected PBL/EDL\n", bhi_info->bhi_ee); + } + + /* Return the number of bytes read */ + return 0; +} + +/* setup rddm vector table for rddm transfer */ +static void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) +{ + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + int i = 0; + + for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { + MHI_VERB("Setting vector:%pad size:%zu\n", + &mhi_buf->dma_addr, mhi_buf->len); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = mhi_buf->len; + } +} + +/* collect rddm during kernel panic */ +static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) +{ + int ret; + struct mhi_buf *mhi_buf; + u32 sequence_id; + u32 rx_status; + enum mhi_ee ee; + struct image_info *rddm_image = mhi_cntrl->rddm_image; + const u32 delayus = 2000; + u32 retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + const u32 rddm_timeout_us = 200000; + int rddm_retry = rddm_timeout_us / delayus; /* time to enter rddm */ + void __iomem *base = mhi_cntrl->bhie; + + MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* + * This should only be executing during a kernel panic, we expect all + * other cores to shutdown while we're collecting rddm buffer. After + * returning from this function, we expect device to reset. + * + * Normaly, we would read/write pm_state only after grabbing + * pm_lock, since we're in a panic, skipping it. Also there is no + * gurantee this state change would take effect since + * we're setting it w/o grabbing pmlock, it's best effort + */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + /* update should take the effect immediately */ + smp_wmb(); + + /* setup the RX vector table */ + mhi_rddm_prepare(mhi_cntrl, rddm_image); + mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1]; + + MHI_LOG("Starting BHIe programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; +#else + sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; +#endif + if (unlikely(!sequence_id)) + sequence_id = 1; + + + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + + MHI_LOG("Trigger device into RDDM mode\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + MHI_LOG("Waiting for device to enter RDDM\n"); + while (rddm_retry--) { + ee = mhi_get_exec_env(mhi_cntrl); + if (ee == MHI_EE_RDDM) + break; + + udelay(delayus); + } + + if (rddm_retry <= 0) { + /* This is a hardware reset, will force device to enter rddm */ + MHI_LOG( + "Did not enter RDDM triggering host req. reset to force rddm\n"); + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, + MHI_SOC_RESET_REQ_OFFSET, MHI_SOC_RESET_REQ); + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + MHI_LOG("Waiting for image download completion, current EE:%s\n", + TO_MHI_EXEC_STR(ee)); + while (retry--) { + ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status); + if (ret) + return -EIO; + + if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) { + MHI_LOG("RDDM successfully collected\n"); + return 0; + } + + udelay(delayus); + } + + ee = mhi_get_exec_env(mhi_cntrl); + ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); + + MHI_ERR("Did not complete RDDM transfer\n"); + MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee)); + MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret); + + return -EIO; +} + +/* download ramdump image from device */ +int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) +{ + void __iomem *base = mhi_cntrl->bhie; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + struct image_info *rddm_image = mhi_cntrl->rddm_image; + struct mhi_buf *mhi_buf; + int ret; + u32 rx_status; + u32 sequence_id; + + if (!rddm_image) + return -ENOMEM; + + if (in_panic) + return __mhi_download_rddm_in_panic(mhi_cntrl); + + MHI_LOG("Waiting for device to enter RDDM state from EE:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI is not in valid state, pm_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + return -EIO; + } + + mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); + + /* vector table is the last entry */ + mhi_buf = &rddm_image->mhi_buf[rddm_image->entries - 1]; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + MHI_LOG("Starting BHIe Programming for RDDM\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + sequence_id = get_random_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; +#else + sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK; +#endif + mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT, + sequence_id); + read_unlock_bh(pm_lock); + + MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n", + upper_32_bits(mhi_buf->dma_addr), + lower_32_bits(mhi_buf->dma_addr), + mhi_buf->len, sequence_id); + MHI_LOG("Waiting for image download completion\n"); + + /* waiting for image download completion */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_RXVECSTATUS_OFFS, + BHIE_RXVECSTATUS_STATUS_BMSK, + BHIE_RXVECSTATUS_STATUS_SHFT, + &rx_status) || rx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} +EXPORT_SYMBOL(mhi_download_rddm_img); + +static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, + const struct mhi_buf *mhi_buf) +{ + void __iomem *base = mhi_cntrl->bhie; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + u32 tx_status; + + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + return -EIO; + } + + MHI_LOG("Starting BHIe Programming\n"); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, + upper_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS, + lower_32_bits(mhi_buf->dma_addr)); + + mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + mhi_cntrl->sequence_id = get_random_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK; +#else + mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK; +#endif + mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, + BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT, + mhi_cntrl->sequence_id); + read_unlock_bh(pm_lock); + + MHI_LOG("Upper:0x%x Lower:0x%x len:0x%zx sequence:%u\n", + upper_32_bits(mhi_buf->dma_addr), + lower_32_bits(mhi_buf->dma_addr), + mhi_buf->len, mhi_cntrl->sequence_id); + MHI_LOG("Waiting for image transfer completion\n"); + + /* waiting for image download completion */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, + BHIE_TXVECSTATUS_OFFS, + BHIE_TXVECSTATUS_STATUS_BMSK, + BHIE_TXVECSTATUS_STATUS_SHFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + return (tx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; +} + +static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, + dma_addr_t dma_addr, + size_t size) +{ + u32 tx_status, val; + u32 ImgTxDb = 0x1; + int i, ret; + void __iomem *base = mhi_cntrl->bhi; + rwlock_t *pm_lock = &mhi_cntrl->pm_lock; + struct { + char *name; + u32 offset; + } error_reg[] = { + { "ERROR_CODE", BHI_ERRCODE }, + { "ERROR_DBG1", BHI_ERRDBG1 }, + { "ERROR_DBG2", BHI_ERRDBG2 }, + { "ERROR_DBG3", BHI_ERRDBG3 }, + { NULL }, + }; + + MHI_LOG("Starting BHI programming\n"); + + /* program start sbl download via bhi protocol */ + read_lock_bh(pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, + upper_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW, + lower_32_bits(dma_addr)); + mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size); + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS); + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS); + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); + mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, ImgTxDb); + read_unlock_bh(pm_lock); + + MHI_LOG("Waiting for image transfer completion\n"); + + /* waiting for image download completion */ + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, BHI_STATUS, + BHI_STATUS_MASK, BHI_STATUS_SHIFT, + &tx_status) || tx_status, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + goto invalid_pm_state; + + if (tx_status == BHI_STATUS_ERROR) { + MHI_ERR("Image transfer failed\n"); + read_lock_bh(pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + for (i = 0; error_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, base, + error_reg[i].offset, &val); + if (ret) + break; + MHI_ERR("reg:%s value:0x%x\n", + error_reg[i].name, val); + } + } + read_unlock_bh(pm_lock); + goto invalid_pm_state; + } + + return (tx_status == BHI_STATUS_SUCCESS) ? 0 : -ETIMEDOUT; + +invalid_pm_state: + + return -EIO; +} + +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info) +{ + int i; + struct mhi_buf *mhi_buf = image_info->mhi_buf; + + for (i = 0; i < image_info->entries; i++, mhi_buf++) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + + kfree(image_info->mhi_buf); + kfree(image_info); +} + +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, + size_t alloc_size) +{ + size_t seg_size = mhi_cntrl->seg_len; + /* requier additional entry for vec table */ + int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1; + int i; + struct image_info *img_info; + struct mhi_buf *mhi_buf; + + MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n", + alloc_size, seg_size, segments); + + img_info = kzalloc(sizeof(*img_info), GFP_KERNEL); + if (!img_info) + return -ENOMEM; + + /* allocate memory for entries */ + img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf), + GFP_KERNEL); + if (!img_info->mhi_buf) + goto error_alloc_mhi_buf; + + /* allocate and populate vector table */ + mhi_buf = img_info->mhi_buf; + for (i = 0; i < segments; i++, mhi_buf++) { + size_t vec_size = seg_size; + + /* last entry is for vector table */ + if (i == segments - 1) + vec_size = sizeof(struct bhi_vec_entry) * i; + + mhi_buf->len = vec_size; + mhi_buf->buf = mhi_alloc_coherent(mhi_cntrl, vec_size, + &mhi_buf->dma_addr, GFP_KERNEL); + if (!mhi_buf->buf) + goto error_alloc_segment; + + MHI_LOG("Entry:%d Address:0x%llx size:%zu\n", i, + (unsigned long long)mhi_buf->dma_addr, + mhi_buf->len); + } + + img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf; + img_info->entries = segments; + *image_info = img_info; + + MHI_LOG("Successfully allocated bhi vec table\n"); + + return 0; + +error_alloc_segment: + for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) + mhi_free_coherent(mhi_cntrl, mhi_buf->len, mhi_buf->buf, + mhi_buf->dma_addr); + +error_alloc_mhi_buf: + kfree(img_info); + + return -ENOMEM; +} + +static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, + const struct firmware *firmware, + struct image_info *img_info) +{ + size_t remainder = firmware->size; + size_t to_cpy; + const u8 *buf = firmware->data; + int i = 0; + struct mhi_buf *mhi_buf = img_info->mhi_buf; + struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; + + while (remainder) { + MHI_ASSERT(i >= img_info->entries, "malformed vector table"); + + to_cpy = min(remainder, mhi_buf->len); + memcpy(mhi_buf->buf, buf, to_cpy); + bhi_vec->dma_addr = mhi_buf->dma_addr; + bhi_vec->size = to_cpy; + + MHI_VERB("Setting Vector:0x%llx size: %llu\n", + bhi_vec->dma_addr, bhi_vec->size); + buf += to_cpy; + remainder -= to_cpy; + i++; + bhi_vec++; + mhi_buf++; + } +} + +void mhi_fw_load_worker(struct work_struct *work) +{ + int ret; + struct mhi_controller *mhi_cntrl; + const char *fw_name; + const struct firmware *firmware; + struct image_info *image_info; + void *buf; + dma_addr_t dma_addr; + size_t size; + + mhi_cntrl = container_of(work, struct mhi_controller, fw_worker); + + MHI_LOG("Waiting for device to enter PBL from EE:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + ret = wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_PBL(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI is not in valid state\n"); + return; + } + + MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + /* if device in pthru, we do not have to load firmware */ + if (mhi_cntrl->ee == MHI_EE_PTHRU) + return; + + fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ? + mhi_cntrl->edl_image : mhi_cntrl->fw_image; + + if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size || + !mhi_cntrl->seg_len))) { + MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n"); + return; + } + + ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev); + if (ret) { + MHI_ERR("Error loading firmware, ret:%d\n", ret); + return; + } + + size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; + + /* the sbl size provided is maximum size, not necessarily image size */ + if (size > firmware->size) + size = firmware->size; + + buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL); + if (!buf) { + MHI_ERR("Could not allocate memory for image\n"); + release_firmware(firmware); + return; + } + + /* load sbl image */ + memcpy(buf, firmware->data, size); + ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size); + mhi_free_coherent(mhi_cntrl, size, buf, dma_addr); + + /* error or in edl, we're done */ + if (ret || mhi_cntrl->ee == MHI_EE_EDL) { + release_firmware(firmware); + return; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* + * if we're doing fbc, populate vector tables while + * device transitioning into MHI READY state + */ + if (mhi_cntrl->fbc_download) { + ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, + firmware->size); + if (ret) { + MHI_ERR("Error alloc size of %zu\n", firmware->size); + goto error_alloc_fw_table; + } + + MHI_LOG("Copying firmware image into vector table\n"); + + /* load the firmware into BHIE vec table */ + mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); + } + + /* transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + + MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + if (!mhi_cntrl->fbc_download) { + release_firmware(firmware); + return; + } + + if (ret) { + MHI_ERR("Did not transition to READY state\n"); + goto error_read; + } + + /* wait for SBL event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_SBL || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI did not enter BHIE\n"); + goto error_read; + } + + /* start full firmware image download */ + image_info = mhi_cntrl->fbc_image; + ret = mhi_fw_load_amss(mhi_cntrl, + /* last entry is vec table */ + &image_info->mhi_buf[image_info->entries - 1]); + + MHI_LOG("amss fw_load, ret:%d\n", ret); + + release_firmware(firmware); + + return; + +error_read: + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + +error_alloc_fw_table: + release_firmware(firmware); +} + +int BhiWrite(struct mhi_controller *mhi_cntrl, void __user *ubuf, size_t size) +{ + int ret; + dma_addr_t dma_addr; + void *dma_buf; + + MHI_LOG("Device current EE:%s, M:%s, PM:%s\n", + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)), + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + +#if 0 + if (mhi_get_exec_env(mhi_cntrl) == MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_EDL) { + mhi_cntrl->ee = MHI_EE_EDL; + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms + 500)); + } +#endif + +#if 0 + if (!MHI_IN_PBL(mhi_cntrl->ee) || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI is not in valid BHI state\n"); + return -EINVAL; + } +#endif + + if (mhi_cntrl->ee != MHI_EE_EDL) { + MHI_ERR("MHI is not in EDL state\n"); + return -EINVAL; + } + + dma_buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL); + if (!dma_buf) { + MHI_ERR("Could not allocate memory for image\n"); + return -ENOMEM; + } + + ret = copy_from_user(dma_buf, ubuf, size); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy buf error, ret = %d\n", ret); + mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr);; + return ret; + } + + ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size); + mhi_free_coherent(mhi_cntrl, size, dma_buf, dma_addr); + + if (ret) { + MHI_ERR("ret = %d, ee=%d\n", ret, mhi_cntrl->ee); + goto error_state; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_RESET; + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* transitioning into MHI RESET->READY state */ + ret = mhi_ready_state_transition(mhi_cntrl); + if (ret) { + MHI_ERR("Did not transition to READY state\n"); + goto error_state; + } + + MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + /* wait for BHIE event */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_FP || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI did not enter Flash Programmer Environment\n"); + goto error_state; + } + + MHI_LOG("MHI enter Flash Programmer Environment\n"); + return 0; + +error_state: + MHI_LOG("Device current EE:%s, M:%s\n", + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl)), + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl))); + + return ret; +} + +long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *ubuf) +{ + long ret = -EINVAL; + BHI_INFO_TYPE bhi_info; + + ret = BhiRead(mhi_cntrl, &bhi_info); + if (ret) { + MHI_ERR("IOCTL_BHI_GETDEVINFO BhiRead error, ret = %ld\n", ret); + return ret; + } + + ret = copy_to_user(ubuf, &bhi_info, sizeof(bhi_info)); + if (ret) { + MHI_ERR("IOCTL_BHI_GETDEVINFO copy error, ret = %ld\n", ret); + } + + return ret; +} + +long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *ubuf) +{ + long ret = -EINVAL; + size_t size; + + ret = copy_from_user(&size, ubuf, sizeof(size)); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, ret = %ld\n", ret); + return ret; + } + if (size <= 0) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE copy size error, size\n"); + return -EINVAL; + } + ret = BhiWrite(mhi_cntrl, ubuf+sizeof(size), size); + if (ret) { + MHI_ERR("IOCTL_BHI_WRITEIMAGE BhiWrite error, ret = %ld\n", ret); + } + + return ret; +} diff --git a/wwan/driver/quectel_MHI/src/core/mhi_dtr.c b/wwan/driver/quectel_MHI/src/core/mhi_dtr.c new file mode 100644 index 0000000..7ce44b3 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/core/mhi_dtr.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +struct __packed dtr_ctrl_msg { + u32 preamble; + u32 msg_id; + u32 dest_id; + u32 size; + u32 msg; +}; + +#define CTRL_MAGIC (0x4C525443) +#define CTRL_MSG_DTR BIT(0) +#define CTRL_MSG_RTS BIT(1) +#define CTRL_MSG_DCD BIT(0) +#define CTRL_MSG_DSR BIT(1) +#define CTRL_MSG_RI BIT(3) +#define CTRL_HOST_STATE (0x10) +#define CTRL_DEVICE_STATE (0x11) +#define CTRL_GET_CHID(dtr) (dtr->dest_id & 0xFF) + +static int mhi_dtr_tiocmset(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev, + u32 tiocm) +{ + struct dtr_ctrl_msg *dtr_msg = NULL; + struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan; + spinlock_t *res_lock = &mhi_dev->dev.devres_lock; + u32 cur_tiocm; + int ret = 0; + + cur_tiocm = mhi_dev->tiocm & ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI); + + tiocm &= (TIOCM_DTR | TIOCM_RTS); + + /* state did not changed */ + if (cur_tiocm == tiocm) + return 0; + + mutex_lock(&dtr_chan->mutex); + + dtr_msg = kzalloc(sizeof(*dtr_msg), GFP_KERNEL); + if (!dtr_msg) { + ret = -ENOMEM; + goto tiocm_exit; + } + + dtr_msg->preamble = CTRL_MAGIC; + dtr_msg->msg_id = CTRL_HOST_STATE; + dtr_msg->dest_id = mhi_dev->ul_chan_id; + dtr_msg->size = sizeof(u32); + if (tiocm & TIOCM_DTR) + dtr_msg->msg |= CTRL_MSG_DTR; + if (tiocm & TIOCM_RTS) + dtr_msg->msg |= CTRL_MSG_RTS; + +/* +* 'minicom -D /dev/mhi_DUN' will send RTS:1 when open, and RTS:0 when exit. +* RTS:0 will prevent modem output AT response. +* But 'busybox microcom' do not send any RTS to modem. +* [75094.969783] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:1 +* [75100.210994] mhi_uci_q 0306_00.03.00_DUN: mhi_dtr_tiocmset DTR:0 RTS:0 +*/ + dev_dbg(&mhi_dev->dev, "%s DTR:%d RTS:%d\n", __func__, + !!(tiocm & TIOCM_DTR), !!(tiocm & TIOCM_RTS)); + + reinit_completion(&dtr_chan->completion); + ret = mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_TO_DEVICE, dtr_msg, + sizeof(*dtr_msg), MHI_EOT); + if (ret) + goto tiocm_exit; + + ret = wait_for_completion_timeout(&dtr_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret) { + MHI_ERR("Failed to receive transfer callback\n"); + ret = -EIO; + goto tiocm_exit; + } + + ret = 0; + spin_lock_irq(res_lock); + mhi_dev->tiocm &= ~(TIOCM_DTR | TIOCM_RTS); + mhi_dev->tiocm |= tiocm; + spin_unlock_irq(res_lock); + +tiocm_exit: + kfree(dtr_msg); + mutex_unlock(&dtr_chan->mutex); + + return ret; +} + +long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + /* ioctl not supported by this controller */ + if (!mhi_cntrl->dtr_dev) + return -EIO; + + switch (cmd) { + case TIOCMGET: + return mhi_dev->tiocm; + case TIOCMSET: + { + u32 tiocm; + + ret = get_user(tiocm, (u32 *)arg); + if (ret) + return ret; + + return mhi_dtr_tiocmset(mhi_cntrl, mhi_dev, tiocm); + } + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(mhi_ioctl); + +static int mhi_dtr_queue_inbound(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = mhi_cntrl->dtr_dev; + int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + size_t mtu = mhi_dev->mtu; + void *buf; + int ret = -EIO, i; + + for (i = 0; i < nr_trbs; i++) { + buf = kmalloc(mtu, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu, + MHI_EOT); + if (ret) { + kfree(buf); + return ret; + } + } + + return ret; +} + +static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct dtr_ctrl_msg *dtr_msg = mhi_result->buf_addr; + u32 chan; + spinlock_t *res_lock; + + if (mhi_result->transaction_status == -ENOTCONN) { + kfree(mhi_result->buf_addr); + return; + } + + if (mhi_result->bytes_xferd != sizeof(*dtr_msg)) { + MHI_ERR("Unexpected length %zu received\n", + mhi_result->bytes_xferd); + return; + } + + MHI_LOG("preamble:0x%x msg_id:%u dest_id:%u msg:0x%x\n", + dtr_msg->preamble, dtr_msg->msg_id, dtr_msg->dest_id, + dtr_msg->msg); + + chan = CTRL_GET_CHID(dtr_msg); + if (chan >= mhi_cntrl->max_chan) + goto auto_queue; + + mhi_dev = mhi_cntrl->mhi_chan[chan].mhi_dev; + if (!mhi_dev) + goto auto_queue; + + res_lock = &mhi_dev->dev.devres_lock; + spin_lock_irq(res_lock); + mhi_dev->tiocm &= ~(TIOCM_CD | TIOCM_DSR | TIOCM_RI); + + if (dtr_msg->msg & CTRL_MSG_DCD) + mhi_dev->tiocm |= TIOCM_CD; + + if (dtr_msg->msg & CTRL_MSG_DSR) + mhi_dev->tiocm |= TIOCM_DSR; + + if (dtr_msg->msg & CTRL_MSG_RI) + mhi_dev->tiocm |= TIOCM_RI; + spin_unlock_irq(res_lock); + +auto_queue: + mhi_queue_transfer(mhi_cntrl->dtr_dev, DMA_FROM_DEVICE, mhi_result->buf_addr, + mhi_cntrl->dtr_dev->mtu, MHI_EOT); +} + +static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *dtr_chan = mhi_cntrl->dtr_dev->ul_chan; + + MHI_VERB("Received with status:%d\n", mhi_result->transaction_status); + if (!mhi_result->transaction_status) + complete(&dtr_chan->completion); +} + +static void mhi_dtr_remove(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_cntrl->dtr_dev = NULL; +} + +static int mhi_dtr_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + MHI_LOG("Enter for DTR control channel\n"); + + mhi_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu); + ret = mhi_prepare_for_transfer(mhi_dev); + if (!ret) + mhi_cntrl->dtr_dev = mhi_dev; + + if (!ret) + ret = mhi_dtr_queue_inbound(mhi_cntrl); + + MHI_LOG("Exit with ret:%d\n", ret); + + return ret; +} + +static const struct mhi_device_id mhi_dtr_table[] = { + { .chan = "IP_CTRL", .driver_data = sizeof(struct dtr_ctrl_msg) }, + {}, +}; + +static struct mhi_driver mhi_dtr_driver = { + .id_table = mhi_dtr_table, + .remove = mhi_dtr_remove, + .probe = mhi_dtr_probe, + .ul_xfer_cb = mhi_dtr_ul_xfer_cb, + .dl_xfer_cb = mhi_dtr_dl_xfer_cb, + .driver = { + .name = "MHI_DTR", + .owner = THIS_MODULE, + } +}; + +int __init mhi_dtr_init(void) +{ + return mhi_driver_register(&mhi_dtr_driver); +} +void mhi_dtr_exit(void) { + mhi_driver_unregister(&mhi_dtr_driver); +} diff --git a/wwan/driver/quectel_MHI/src/core/mhi_init.c b/wwan/driver/quectel_MHI/src/core/mhi_init.c new file mode 100644 index 0000000..e0c933d --- /dev/null +++ b/wwan/driver/quectel_MHI/src/core/mhi_init.c @@ -0,0 +1,2773 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,11,0 )) +#include +#else +#include +#endif +#include "mhi.h" +#include "mhi_internal.h" + +struct mhi_controller_map { + u32 dev_id; + u32 domain; + u32 bus; + u32 slot; + struct mhi_controller *mhi_cntrl; +}; + +#define MAX_MHI_CONTROLLER 16 +struct mhi_controller_map mhi_controller_minors[MAX_MHI_CONTROLLER]; + +#define MHI_CNTRL_DRIVER_NAME "mhi_cntrl_q" +struct mhi_cntrl_drv { + struct list_head head; + struct mutex lock; + struct class *class; + int major; +}; +static struct mhi_cntrl_drv mhi_cntrl_drv; + +const char * const mhi_ee_str[MHI_EE_MAX] = { + [MHI_EE_PBL] = "PBL", + [MHI_EE_SBL] = "SBL", + [MHI_EE_AMSS] = "AMSS", + [MHI_EE_RDDM] = "RDDM", + [MHI_EE_WFW] = "WFW", + [MHI_EE_PTHRU] = "PASS THRU", + [MHI_EE_EDL] = "EDL", + [MHI_EE_FP] = "FlashProg", + [MHI_EE_DISABLE_TRANSITION] = "DISABLE", +}; + +const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX] = { + [MHI_ST_TRANSITION_PBL] = "PBL", + [MHI_ST_TRANSITION_READY] = "READY", + [MHI_ST_TRANSITION_SBL] = "SBL", + [MHI_ST_TRANSITION_MISSION_MODE] = "MISSION MODE", + [MHI_ST_TRANSITION_FP] = "FlashProg", +}; + +const char * const mhi_state_str[MHI_STATE_MAX] = { + [MHI_STATE_RESET] = "RESET", + [MHI_STATE_READY] = "READY", + [MHI_STATE_M0] = "M0", + [MHI_STATE_M1] = "M1", + [MHI_STATE_M2] = "M2", + [MHI_STATE_M3] = "M3", + [MHI_STATE_BHI] = "BHI", + [MHI_STATE_SYS_ERR] = "SYS_ERR", +}; + +static const char * const mhi_pm_state_str[] = { + [MHI_PM_BIT_DISABLE] = "DISABLE", + [MHI_PM_BIT_POR] = "POR", + [MHI_PM_BIT_M0] = "M0", + [MHI_PM_BIT_M2] = "M2", + [MHI_PM_BIT_M3_ENTER] = "M?->M3", + [MHI_PM_BIT_M3] = "M3", + [MHI_PM_BIT_M3_EXIT] = "M3->M0", + [MHI_PM_BIT_FW_DL_ERR] = "FW DL Error", + [MHI_PM_BIT_SYS_ERR_DETECT] = "SYS_ERR Detect", + [MHI_PM_BIT_SYS_ERR_PROCESS] = "SYS_ERR Process", + [MHI_PM_BIT_SHUTDOWN_PROCESS] = "SHUTDOWN Process", + [MHI_PM_BIT_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect", +}; + +struct mhi_bus mhi_bus; + +const char *to_mhi_pm_state_str(enum MHI_PM_STATE state) +{ + int index = find_last_bit((unsigned long *)&state, 32); + + if (index >= ARRAY_SIZE(mhi_pm_state_str)) + return "Invalid State"; + + return mhi_pm_state_str[index]; +} + +#if 0 +/* MHI protocol require transfer ring to be aligned to ring length */ +static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring, + u64 len) +{ + ring->alloc_size = len + (len - 1); + ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size, + &ring->dma_handle, GFP_KERNEL); + if (!ring->pre_aligned) + return -ENOMEM; + + ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); + return 0; +} +#endif + +static void mhi_ring_aligned_check(struct mhi_controller *mhi_cntrl, u64 rbase, u64 rlen) { + uint64_t ra; + + ra = rbase; + do_div(ra, roundup_pow_of_two(rlen)); + + if (rbase != ra * roundup_pow_of_two(rlen)) { + MHI_ERR("bad params ring base not aligned 0x%llx align 0x%lx\n", rbase, roundup_pow_of_two(rlen)); + } +} + +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + + if (mhi_cntrl->msi_allocated == 1) { + free_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_cntrl); + return; + } + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event); + } + + free_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_cntrl); +} + +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) +{ + int i; + int ret; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + if (mhi_cntrl->msi_allocated == 1) { + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + mhi_event->msi = 0; + } + + ret = request_threaded_irq(mhi_cntrl->irq[0], NULL, + mhi_one_msi_handlr, IRQF_ONESHOT, "mhi", mhi_cntrl); + if (ret) { + MHI_ERR("Error requesting irq:%d, ret=%d\n", mhi_cntrl->irq[0], ret); + } + return ret; + } + + /* for BHI INTVEC msi */ + ret = request_threaded_irq(mhi_cntrl->irq[mhi_cntrl->msi_irq_base], mhi_intvec_handlr, + mhi_intvec_threaded_handlr, IRQF_ONESHOT, + "mhi", mhi_cntrl); + if (ret) + return ret; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ret = request_irq(mhi_cntrl->irq[mhi_event->msi], + mhi_msi_handlr, IRQF_SHARED, "mhi", + mhi_event); + if (ret) { + MHI_ERR("Error requesting irq:%d for ev:%d\n", + mhi_cntrl->irq[mhi_event->msi], i); + goto error_request; + } + } + + return 0; + +error_request: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + if (mhi_event->offload_ev) + continue; + + free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event); + } + free_irq(mhi_cntrl->irq[0], mhi_cntrl); + + return ret; +} + +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event *mhi_event; + struct mhi_ring *ring; + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { + ring = &mhi_cmd->ring; +#if 0 + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); +#endif + ring->base = NULL; + ring->iommu_base = 0; + } + +#if 0 + mhi_free_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); +#endif + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + ring = &mhi_event->ring; +#if 0 + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); +#endif + ring->base = NULL; + ring->iommu_base = 0; + } + +#if 0 + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); +#endif + + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), mhi_ctxt->ctrl_seg, mhi_ctxt->ctrl_seg_addr); + kfree(mhi_ctxt); + mhi_cntrl->mhi_ctxt = NULL; +} + +static int mhi_init_debugfs_mhi_states_open(struct inode *inode, + struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_states_show, inode->i_private); +} + +static int mhi_init_debugfs_mhi_event_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_event_show, inode->i_private); +} + +static int mhi_init_debugfs_mhi_chan_open(struct inode *inode, struct file *fp) +{ + return single_open(fp, mhi_debugfs_mhi_chan_show, inode->i_private); +} + +static const struct file_operations debugfs_state_ops = { + .open = mhi_init_debugfs_mhi_states_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_ev_ops = { + .open = mhi_init_debugfs_mhi_event_open, + .release = single_release, + .read = seq_read, +}; + +static const struct file_operations debugfs_chan_ops = { + .open = mhi_init_debugfs_mhi_chan_open, + .release = single_release, + .read = seq_read, +}; + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_trigger_reset_fops, NULL, + mhi_debugfs_trigger_reset, "%llu\n"); + +#ifdef ENABLE_MHI_MON +struct mon_event_text { + struct list_head e_link; + int type; /* submit, complete, etc. */ + unsigned int tstamp; + u32 chan; + dma_addr_t wp; + struct mhi_tre mhi_tre; + u8 data[32]; + size_t len; +}; + +#define EVENT_MAX (16*PAGE_SIZE / sizeof(struct mon_event_text)) +#define PRINTF_DFL 250 +#define SLAB_NAME_SZ 30 + +struct mon_reader_text { + struct kmem_cache *e_slab; + int nevents; + struct list_head e_list; + struct mon_reader r; /* In C, parent class can be placed anywhere */ + + wait_queue_head_t wait; + int printf_size; + char *printf_buf; + int left_size; + int left_pos; + struct mutex printf_lock; + + char slab_name[SLAB_NAME_SZ]; +}; + +struct mon_text_ptr { + int cnt, limit; + char *pbuf; +}; + +static DEFINE_MUTEX(mon_lock); + +static inline unsigned int mon_get_timestamp(void) +{ + struct timespec64 now; + unsigned int stamp; + + ktime_get_ts64(&now); + stamp = now.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */ + stamp = stamp * USEC_PER_SEC + now.tv_nsec / NSEC_PER_USEC; + return stamp; +} + +static void mon_text_event(struct mon_reader_text *rp, + u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len, + char ev_type) +{ + struct mon_event_text *ep; + + if (rp->nevents >= EVENT_MAX || + (ep = kmem_cache_alloc(rp->e_slab, GFP_ATOMIC)) == NULL) { + rp->r.m_bus->cnt_text_lost++; + return; + } + + ep->type = ev_type; + ep->tstamp = mon_get_timestamp(); + ep->chan = chan; + ep->wp = wp; + ep->mhi_tre = *mhi_tre; + if (len > sizeof(ep->data)) + len = sizeof(ep->data); + memcpy(ep->data, buf, len); + ep->len = len; + rp->nevents++; + list_add_tail(&ep->e_link, &rp->e_list); + wake_up(&rp->wait); +} + +static void mon_text_submit(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, buf, len, 'W'); +} + +static void mon_text_receive(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, buf, len, 'R'); +} + +static void mon_text_complete(void *data, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre) +{ + struct mon_reader_text *rp = data; + mon_text_event(rp, chan, wp, mhi_tre, NULL, 0, 'E'); +} + +static void mon_reader_add(struct mhi_controller *mbus, struct mon_reader *r) +{ + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->nreaders++; + list_add_tail(&r->r_link, &mbus->r_list); + spin_unlock_irqrestore(&mbus->lock, flags); + + kref_get(&mbus->ref); +} + +static void mon_bus_drop(struct kref *r) +{ + struct mhi_controller *mbus = container_of(r, struct mhi_controller, ref); + kfree(mbus); +} + +static void mon_reader_del(struct mhi_controller *mbus, struct mon_reader *r) +{ + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + list_del(&r->r_link); + --mbus->nreaders; + spin_unlock_irqrestore(&mbus->lock, flags); + + kref_put(&mbus->ref, mon_bus_drop); +} + +static void mon_text_ctor(void *mem) +{ + /* + * Nothing to initialize. No, really! + * So, we fill it with garbage to emulate a reused object. + */ + memset(mem, 0xe5, sizeof(struct mon_event_text)); +} + +static int mon_text_open(struct inode *inode, struct file *file) +{ + struct mhi_controller *mbus; + struct mon_reader_text *rp; + int rc; + + mutex_lock(&mon_lock); + mbus = inode->i_private; + + rp = kzalloc(sizeof(struct mon_reader_text), GFP_KERNEL); + if (rp == NULL) { + rc = -ENOMEM; + goto err_alloc; + } + INIT_LIST_HEAD(&rp->e_list); + init_waitqueue_head(&rp->wait); + mutex_init(&rp->printf_lock); + + rp->printf_size = PRINTF_DFL; + rp->printf_buf = kmalloc(rp->printf_size, GFP_KERNEL); + if (rp->printf_buf == NULL) { + rc = -ENOMEM; + goto err_alloc_pr; + } + + rp->r.m_bus = mbus; + rp->r.r_data = rp; + rp->r.rnf_submit = mon_text_submit; + rp->r.rnf_receive = mon_text_receive; + rp->r.rnf_complete = mon_text_complete; + + snprintf(rp->slab_name, SLAB_NAME_SZ, "mon_text_%p", rp); + rp->e_slab = kmem_cache_create(rp->slab_name, + sizeof(struct mon_event_text), sizeof(long), 0, + mon_text_ctor); + if (rp->e_slab == NULL) { + rc = -ENOMEM; + goto err_slab; + } + + mon_reader_add(mbus, &rp->r); + + file->private_data = rp; + mutex_unlock(&mon_lock); + return 0; + +// err_busy: +// kmem_cache_destroy(rp->e_slab); +err_slab: + kfree(rp->printf_buf); +err_alloc_pr: + kfree(rp); +err_alloc: + mutex_unlock(&mon_lock); + return rc; +} + +static struct mon_event_text *mon_text_fetch(struct mon_reader_text *rp, + struct mhi_controller *mbus) +{ + struct list_head *p; + unsigned long flags; + + spin_lock_irqsave(&mbus->lock, flags); + if (list_empty(&rp->e_list)) { + spin_unlock_irqrestore(&mbus->lock, flags); + return NULL; + } + p = rp->e_list.next; + list_del(p); + --rp->nevents; + spin_unlock_irqrestore(&mbus->lock, flags); + return list_entry(p, struct mon_event_text, e_link); +} + +static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, + struct file *file) +{ + struct mhi_controller *mbus = rp->r.m_bus; + DECLARE_WAITQUEUE(waita, current); + struct mon_event_text *ep; + + add_wait_queue(&rp->wait, &waita); + set_current_state(TASK_INTERRUPTIBLE); + while ((ep = mon_text_fetch(rp, mbus)) == NULL) { + if (file->f_flags & O_NONBLOCK) { + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->wait, &waita); + return ERR_PTR(-EWOULDBLOCK); + } + /* + * We do not count nwaiters, because ->release is supposed + * to be called when all openers are gone only. + */ + schedule(); + if (signal_pending(current)) { + remove_wait_queue(&rp->wait, &waita); + return ERR_PTR(-EINTR); + } + set_current_state(TASK_INTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&rp->wait, &waita); + return ep; +} + +static ssize_t mon_text_read_u(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct mon_reader_text *rp = file->private_data; + struct mon_event_text *ep; + struct mon_text_ptr ptr; + + if (rp->left_size) { + int cnt = rp->left_size; + + if (cnt > nbytes) + cnt = nbytes; + if (copy_to_user(buf, rp->printf_buf + rp->left_pos, cnt)) + return -EFAULT; + rp->left_pos += cnt; + rp->left_size -= cnt; + return cnt; + } + + if (IS_ERR(ep = mon_text_read_wait(rp, file))) + return PTR_ERR(ep); + mutex_lock(&rp->printf_lock); + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + "%u %c %03d WP:%llx TRE: %llx %08x %08x", + ep->tstamp, ep->type, ep->chan, (long long unsigned int)ep->wp, + ep->mhi_tre.ptr, ep->mhi_tre.dword[0], ep->mhi_tre.dword[1]); + + if (ep->len) { + struct mon_text_ptr *p = &ptr; + size_t i = 0; + + for (i = 0; i < ep->len; i++) { + if (i % 4 == 0) { + p->cnt += snprintf(p->pbuf + p->cnt, + p->limit - p->cnt, + " "); + } + p->cnt += snprintf(p->pbuf + p->cnt, + p->limit - p->cnt, + "%02x", ep->data[i]); + } + + } + + ptr.cnt += snprintf(ptr.pbuf +ptr.cnt, ptr.limit - ptr.cnt, "\n"); + + if (ptr.cnt > nbytes) { + rp->left_pos = nbytes; + rp->left_size = ptr.cnt - nbytes; + ptr.cnt = nbytes; + } + + if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) + ptr.cnt = -EFAULT; + mutex_unlock(&rp->printf_lock); + kmem_cache_free(rp->e_slab, ep); + return ptr.cnt; +} + +static int mon_text_release(struct inode *inode, struct file *file) +{ + struct mon_reader_text *rp = file->private_data; + struct mhi_controller *mbus; + /* unsigned long flags; */ + struct list_head *p; + struct mon_event_text *ep; + + mutex_lock(&mon_lock); + mbus = inode->i_private; + + if (mbus->nreaders <= 0) { + mutex_unlock(&mon_lock); + return 0; + } + mon_reader_del(mbus, &rp->r); + + /* + * In theory, e_list is protected by mbus->lock. However, + * after mon_reader_del has finished, the following is the case: + * - we are not on reader list anymore, so new events won't be added; + * - whole mbus may be dropped if it was orphaned. + * So, we better not touch mbus. + */ + /* spin_lock_irqsave(&mbus->lock, flags); */ + while (!list_empty(&rp->e_list)) { + p = rp->e_list.next; + ep = list_entry(p, struct mon_event_text, e_link); + list_del(p); + --rp->nevents; + kmem_cache_free(rp->e_slab, ep); + } + /* spin_unlock_irqrestore(&mbus->lock, flags); */ + + kmem_cache_destroy(rp->e_slab); + kfree(rp->printf_buf); + kfree(rp); + + mutex_unlock(&mon_lock); + return 0; +} + + +static const struct file_operations mon_fops_text_u = { + .owner = THIS_MODULE, + .open = mon_text_open, + .llseek = no_llseek, + .read = mon_text_read_u, + .release = mon_text_release, +}; +#endif + +void mhi_init_debugfs(struct mhi_controller *mhi_cntrl) +{ + struct dentry *dentry; + char node[64]; + +#ifdef ENABLE_MHI_MON + struct mhi_controller *mbus = mhi_cntrl; + + mbus->nreaders = 0; + kref_init(&mbus->ref); + spin_lock_init(&mbus->lock); + INIT_LIST_HEAD(&mbus->r_list); +#endif + + if (!mhi_cntrl->parent) + snprintf(node, sizeof(node), "mhi_%04x_%02x:%02x.%02x", + mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, + mhi_cntrl->slot); + else + snprintf(node, sizeof(node), "%04x_%02x:%02x.%02x", + mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus, + mhi_cntrl->slot); + + dentry = debugfs_create_dir(node, mhi_cntrl->parent); + if (IS_ERR_OR_NULL(dentry)) + return; + + debugfs_create_file("states", 0444, dentry, mhi_cntrl, + &debugfs_state_ops); + debugfs_create_file("events", 0444, dentry, mhi_cntrl, + &debugfs_ev_ops); + debugfs_create_file("chan", 0444, dentry, mhi_cntrl, + &debugfs_chan_ops); + debugfs_create_file("reset", 0444, dentry, mhi_cntrl, + &debugfs_trigger_reset_fops); +#ifdef ENABLE_MHI_MON + debugfs_create_file("mhimon", 0444, dentry, mhi_cntrl, + &mon_fops_text_u); +#endif + mhi_cntrl->dentry = dentry; +} + +void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl) +{ + debugfs_remove_recursive(mhi_cntrl->dentry); + mhi_cntrl->dentry = NULL; +} + +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) +{ + struct mhi_ctxt *mhi_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_event_ctxt *er_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + struct mhi_cmd *mhi_cmd; + int ret = -ENOMEM, i; + + atomic_set(&mhi_cntrl->dev_wake, 0); + atomic_set(&mhi_cntrl->alloc_size, 0); + atomic_set(&mhi_cntrl->pending_pkts, 0); + + mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); + if (!mhi_ctxt) + return -ENOMEM; + + mhi_ctxt->ctrl_seg = mhi_alloc_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), + &mhi_ctxt->ctrl_seg_addr, GFP_KERNEL); + MHI_LOG("mhi_ctxt->ctrl_seg = %p\n", mhi_ctxt->ctrl_seg); + if (!mhi_ctxt->ctrl_seg) + goto error_alloc_chan_ctxt; + + if ((unsigned long)mhi_ctxt->ctrl_seg & (4096-1)) { + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->ctrl_seg), mhi_ctxt->ctrl_seg, mhi_ctxt->ctrl_seg_addr); + goto error_alloc_chan_ctxt; + } + + /* setup channel ctxt */ +#if 1 + mhi_ctxt->chan_ctxt = mhi_ctxt->ctrl_seg->chan_ctxt; + mhi_ctxt->chan_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, chan_ctxt); +#else + mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->chan_ctxt) * mhi_cntrl->max_chan, + &mhi_ctxt->chan_ctxt_addr, GFP_KERNEL); + if (!mhi_ctxt->chan_ctxt) + goto error_alloc_chan_ctxt; +#endif + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + /* If it's offload channel skip this step */ + if (mhi_chan->offload_ch) + continue; + + chan_ctxt->chstate = MHI_CH_STATE_DISABLED; + chan_ctxt->brstmode = mhi_chan->db_cfg.brstmode; + chan_ctxt->pollcfg = mhi_chan->db_cfg.pollcfg; + chan_ctxt->chtype = mhi_chan->type; + chan_ctxt->erindex = mhi_chan->er_index; + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + mhi_chan->tre_ring.db_addr = &chan_ctxt->wp; + } + + /* setup event context */ +#if 1 + mhi_ctxt->er_ctxt = mhi_ctxt->ctrl_seg->er_ctxt; + mhi_ctxt->er_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, er_ctxt); +#else + mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->er_ctxt) * mhi_cntrl->total_ev_rings, + &mhi_ctxt->er_ctxt_addr, GFP_KERNEL); + if (!mhi_ctxt->er_ctxt) + goto error_alloc_er_ctxt; +#endif + + er_ctxt = mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* it's a satellite ev, we do not touch it */ + if (mhi_event->offload_ev) + continue; + + er_ctxt->intmodc = 0; + er_ctxt->intmodt = mhi_event->intmod; + er_ctxt->ertype = MHI_ER_TYPE_VALID; + if (mhi_cntrl->msi_allocated == 1) { + mhi_event->msi = 0; + } + er_ctxt->msivec = mhi_event->msi; + mhi_event->db_cfg.db_mode = true; + + ring->el_size = sizeof(struct mhi_tre); + ring->len = ring->el_size * ring->elements; +#if 1 + ring->alloc_size = ring->len; + ring->pre_aligned = mhi_ctxt->ctrl_seg->event_ring[i]; + ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, event_ring[i]); + ring->iommu_base = ring->dma_handle; + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); +#else + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_er; +#endif + + ring->rp = ring->wp = ring->base; + er_ctxt->rbase = ring->iommu_base; + er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; + er_ctxt->rlen = ring->len; + ring->ctxt_wp = &er_ctxt->wp; + + mhi_ring_aligned_check(mhi_cntrl, er_ctxt->rbase, er_ctxt->rlen); + memset(ring->base, 0xCC, ring->len); + } + + /* setup cmd context */ +#if 1 + mhi_ctxt->cmd_ctxt = mhi_ctxt->ctrl_seg->cmd_ctxt; + mhi_ctxt->cmd_ctxt_addr = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, cmd_ctxt); +#else + mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + &mhi_ctxt->cmd_ctxt_addr, GFP_KERNEL); + if (!mhi_ctxt->cmd_ctxt) + goto error_alloc_er; +#endif + + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->el_size = sizeof(struct mhi_tre); + ring->elements = CMD_EL_PER_RING; + ring->len = ring->el_size * ring->elements; +#if 1 + ring->alloc_size = ring->len; + ring->pre_aligned = mhi_ctxt->ctrl_seg->cmd_ring[i]; + ring->dma_handle = mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, cmd_ring[i]); + ring->iommu_base = ring->dma_handle; + ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); +#else + ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); + if (ret) + goto error_alloc_cmd; +#endif + + ring->rp = ring->wp = ring->base; + cmd_ctxt->rbase = ring->iommu_base; + cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; + cmd_ctxt->rlen = ring->len; + ring->ctxt_wp = &cmd_ctxt->wp; + + mhi_ring_aligned_check(mhi_cntrl, cmd_ctxt->rbase, cmd_ctxt->rlen); + } + + mhi_cntrl->mhi_ctxt = mhi_ctxt; + + return 0; + +#if 0 +error_alloc_cmd: + for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { + struct mhi_ring *ring = &mhi_cmd->ring; + + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + mhi_free_coherent(mhi_cntrl, + sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, + mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); + i = mhi_cntrl->total_ev_rings; + mhi_event = mhi_cntrl->mhi_event + i; + +error_alloc_er: + for (--i, --mhi_event; i >= 0; i--, mhi_event--) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) + continue; + + mhi_free_coherent(mhi_cntrl, ring->alloc_size, + ring->pre_aligned, ring->dma_handle); + } + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) * + mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, + mhi_ctxt->er_ctxt_addr); + +error_alloc_er_ctxt: + mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) * + mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, + mhi_ctxt->chan_ctxt_addr); +#endif + +error_alloc_chan_ctxt: + kfree(mhi_ctxt); + + return ret; +} + +static int mhi_get_tsync_er_cfg(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + + /* find event ring with timesync support */ + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) + if (mhi_event->data_type == MHI_ER_TSYNC_ELEMENT_TYPE) + return mhi_event->er_index; + + return -ENOENT; +} + +int mhi_init_timesync(struct mhi_controller *mhi_cntrl) +{ + struct mhi_timesync *mhi_tsync; + u32 time_offset, db_offset; + int ret; + + read_lock_bh(&mhi_cntrl->pm_lock); + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + ret = -EIO; + goto exit_timesync; + } + + ret = mhi_get_capability_offset(mhi_cntrl, TIMESYNC_CAP_ID, + &time_offset); + if (ret) { + MHI_LOG("No timesync capability found\n"); + goto exit_timesync; + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!mhi_cntrl->time_get || !mhi_cntrl->lpm_disable || + !mhi_cntrl->lpm_enable) + return -EINVAL; + + /* register method supported */ + mhi_tsync = kzalloc(sizeof(*mhi_tsync), GFP_KERNEL); + if (!mhi_tsync) + return -ENOMEM; + + spin_lock_init(&mhi_tsync->lock); + INIT_LIST_HEAD(&mhi_tsync->head); + init_completion(&mhi_tsync->completion); + + /* save time_offset for obtaining time */ + MHI_LOG("TIME OFFS:0x%x\n", time_offset); + mhi_tsync->time_reg = mhi_cntrl->regs + time_offset + + TIMESYNC_TIME_LOW_OFFSET; + + mhi_cntrl->mhi_tsync = mhi_tsync; + + ret = mhi_create_timesync_sysfs(mhi_cntrl); + if (unlikely(ret)) { + /* kernel method still work */ + MHI_ERR("Failed to create timesync sysfs nodes\n"); + } + + read_lock_bh(&mhi_cntrl->pm_lock); + + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + ret = -EIO; + goto exit_timesync; + } + + /* get DB offset if supported, else return */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, + time_offset + TIMESYNC_DB_OFFSET, &db_offset); + if (ret || !db_offset) { + ret = 0; + goto exit_timesync; + } + + MHI_LOG("TIMESYNC_DB OFFS:0x%x\n", db_offset); + mhi_tsync->db = mhi_cntrl->regs + db_offset; + + read_unlock_bh(&mhi_cntrl->pm_lock); + + /* get time-sync event ring configuration */ + ret = mhi_get_tsync_er_cfg(mhi_cntrl); + if (ret < 0) { + MHI_LOG("Could not find timesync event ring\n"); + return ret; + } + + mhi_tsync->er_index = ret; + + ret = mhi_send_cmd(mhi_cntrl, NULL, MHI_CMD_TIMSYNC_CFG); + if (ret) { + MHI_ERR("Failed to send time sync cfg cmd\n"); + return ret; + } + + ret = wait_for_completion_timeout(&mhi_tsync->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || mhi_tsync->ccs != MHI_EV_CC_SUCCESS) { + MHI_ERR("Failed to get time cfg cmd completion\n"); + return -EIO; + } + + return 0; + +exit_timesync: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + +int mhi_init_mmio(struct mhi_controller *mhi_cntrl) +{ + u32 val = 0; + int i, ret; + struct mhi_chan *mhi_chan; + struct mhi_event *mhi_event; + void __iomem *base = mhi_cntrl->regs; + struct { + u32 offset; + u32 mask; + u32 shift; + u32 val; + } reg_info[] = { + { + CCABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + CCABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), + }, + { + ECABAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + ECABAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), + }, + { + CRCBAP_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, + { + CRCBAP_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), + }, +#if 0 //carl.yin 20190527 UDE-WIN-InitMmio + { + MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, + mhi_cntrl->total_ev_rings, + }, + { + MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, + mhi_cntrl->hw_ev_rings, + }, +#endif + { + MHICTRLBASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr), + }, + { + MHICTRLBASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr), + }, + { + MHIDATABASE_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_start), + }, + { + MHIDATABASE_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_start), + }, + { + MHICTRLLIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr+sizeof(struct mhi_ctrl_seg)), + }, + { + MHICTRLLIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->mhi_ctxt->ctrl_seg_addr+sizeof(struct mhi_ctrl_seg)), + }, + { + MHIDATALIMIT_HIGHER, U32_MAX, 0, + upper_32_bits(mhi_cntrl->iova_stop), + }, + { + MHIDATALIMIT_LOWER, U32_MAX, 0, + lower_32_bits(mhi_cntrl->iova_stop), + }, + { 0, 0, 0, 0 } + }; + + MHI_LOG("Initializing MMIO\n"); + + /* set up DB register for all the chan rings */ + ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK, + CHDBOFF_CHDBOFF_SHIFT, &val); + if (ret) + return -EIO; + + MHI_LOG("CHDBOFF:0x%x\n", val); + + /* setup wake db */ + mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); +#if 0 //'EM120RGLAPR02A07M4G_11' will treate as chan 127's interrput, and report complete event over cmd ring, but cmd ring is not set by now + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0); + mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0); + mhi_cntrl->wake_set = false; +#endif + + /* setup channel db addresses */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) + mhi_chan->tre_ring.db_addr = base + val; + + /* setup event ring db addresses */ + ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK, + ERDBOFF_ERDBOFF_SHIFT, &val); + if (ret) + return -EIO; + + MHI_LOG("ERDBOFF:0x%x\n", val); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->ring.db_addr = base + val; + } + + /* set up DB register for primary CMD rings */ + mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; + + MHI_LOG("Programming all MMIO values.\n"); + for (i = 0; reg_info[i].offset; i++) + mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].mask, reg_info[i].shift, + reg_info[i].val); + + return 0; +} + +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; + +#if 0 + mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); +#endif + kfree(buf_ring->base); + + buf_ring->base = tre_ring->base = NULL; + chan_ctxt->rbase = 0; +} + +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring; + struct mhi_ring *tre_ring; + struct mhi_chan_ctxt *chan_ctxt; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + tre_ring->el_size = sizeof(struct mhi_tre); + tre_ring->len = tre_ring->el_size * tre_ring->elements; + chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; +#if 1 + tre_ring->alloc_size = tre_ring->len; + if (MHI_CLIENT_IP_HW_0_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->hw_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, hw_in_chan_ring[mhi_chan->ring]); + } + else if (MHI_CLIENT_IP_HW_0_OUT == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->hw_out_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, hw_out_chan_ring[mhi_chan->ring]); + } +#ifdef ENABLE_IP_SW0 + else if (MHI_CLIENT_IP_SW_0_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->sw_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, sw_in_chan_ring[mhi_chan->ring]); + } + else if (MHI_CLIENT_IP_SW_0_OUT == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->sw_out_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, sw_out_chan_ring[mhi_chan->ring]); + } +#endif + +#ifdef ENABLE_ADPL + else if (MHI_CLIENT_ADPL == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->adpl_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, adpl_in_chan_ring[mhi_chan->ring]); + } +#endif + +#ifdef ENABLE_QDSS + else if (MHI_CLIENT_IP_HW_QDSS == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->qdss_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, qdss_in_chan_ring[mhi_chan->ring]); + } +#endif + + else if (MHI_CLIENT_DIAG_IN == mhi_chan->chan) { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->diag_in_chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, diag_in_chan_ring[mhi_chan->ring]); + } + else { + tre_ring->pre_aligned = &mhi_cntrl->mhi_ctxt->ctrl_seg->chan_ring[mhi_chan->ring]; + tre_ring->dma_handle = mhi_cntrl->mhi_ctxt->ctrl_seg_addr + offsetof(struct mhi_ctrl_seg, chan_ring[mhi_chan->ring]); + } + tre_ring->iommu_base = tre_ring->dma_handle; + tre_ring->base = tre_ring->pre_aligned + (tre_ring->iommu_base - tre_ring->dma_handle); + ret = 0; +#else + ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); +#endif + if (ret) + return -ENOMEM; + + buf_ring->el_size = sizeof(struct mhi_buf_info); + buf_ring->len = buf_ring->el_size * buf_ring->elements; + buf_ring->base = kzalloc(buf_ring->len, GFP_KERNEL); + + if (!buf_ring->base) { +#if 0 + mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size, + tre_ring->pre_aligned, tre_ring->dma_handle); +#endif + return -ENOMEM; + } + + chan_ctxt->chstate = MHI_CH_STATE_ENABLED; + chan_ctxt->rbase = tre_ring->iommu_base; + chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; + chan_ctxt->rlen = tre_ring->len; + tre_ring->ctxt_wp = &chan_ctxt->wp; + + tre_ring->rp = tre_ring->wp = tre_ring->base; + buf_ring->rp = buf_ring->wp = buf_ring->base; + mhi_chan->db_cfg.db_mode = true; + + mhi_ring_aligned_check(mhi_cntrl, chan_ctxt->rbase, chan_ctxt->rlen); + /* update to all cores */ + smp_wmb(); + + return 0; +} + +int mhi_device_configure(struct mhi_device *mhi_dev, + enum dma_data_direction dir, + struct mhi_buf *cfg_tbl, + int elements) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *ch_ctxt; + int er_index, chan; + + switch (dir) { + case DMA_TO_DEVICE: + mhi_chan = mhi_dev->ul_chan; + break; + case DMA_BIDIRECTIONAL: + case DMA_FROM_DEVICE: + case DMA_NONE: + mhi_chan = mhi_dev->dl_chan; + break; + default: + return -EINVAL; + } + + er_index = mhi_chan->er_index; + chan = mhi_chan->chan; + + for (; elements > 0; elements--, cfg_tbl++) { + /* update event context array */ + if (!strcmp(cfg_tbl->name, "ECA")) { + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[er_index]; + if (sizeof(*er_ctxt) != cfg_tbl->len) { + MHI_ERR( + "Invalid ECA size, expected:%zu actual%zu\n", + sizeof(*er_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)er_ctxt, cfg_tbl->buf, sizeof(*er_ctxt)); + continue; + } + + /* update channel context array */ + if (!strcmp(cfg_tbl->name, "CCA")) { + ch_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[chan]; + if (cfg_tbl->len != sizeof(*ch_ctxt)) { + MHI_ERR( + "Invalid CCA size, expected:%zu actual:%zu\n", + sizeof(*ch_ctxt), cfg_tbl->len); + return -EINVAL; + } + memcpy((void *)ch_ctxt, cfg_tbl->buf, sizeof(*ch_ctxt)); + continue; + } + + return -EINVAL; + } + + return 0; +} + +#if 0 +static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int i, ret, num = 0; + struct mhi_event *mhi_event; + struct device_node *child; + + of_node = of_find_node_by_name(of_node, "mhi_events"); + if (!of_node) + return -EINVAL; + + for_each_available_child_of_node(of_node, child) { + if (!strcmp(child->name, "mhi_event")) + num++; + } + + if (!num) + return -EINVAL; + + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* populate ev ring */ + mhi_event = mhi_cntrl->mhi_event; + i = 0; + for_each_available_child_of_node(of_node, child) { + if (strcmp(child->name, "mhi_event")) + continue; + + mhi_event->er_index = i++; + ret = of_property_read_u32(child, "mhi,num-elements", + (u32 *)&mhi_event->ring.elements); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,intmod", + &mhi_event->intmod); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,msi", + &mhi_event->msi); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,chan", + &mhi_event->chan); + if (!ret) { + if (mhi_event->chan >= mhi_cntrl->max_chan) + goto error_ev_cfg; + /* this event ring has a dedicated channel */ + mhi_event->mhi_chan = + &mhi_cntrl->mhi_chan[mhi_event->chan]; + } + + ret = of_property_read_u32(child, "mhi,priority", + &mhi_event->priority); + if (ret) + goto error_ev_cfg; + + ret = of_property_read_u32(child, "mhi,brstmode", + &mhi_event->db_cfg.brstmode); + if (ret || MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) + goto error_ev_cfg; + + mhi_event->db_cfg.process_db = + (mhi_event->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + + ret = of_property_read_u32(child, "mhi,data-type", + &mhi_event->data_type); + if (ret) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; + + if (mhi_event->data_type > MHI_ER_DATA_TYPE_MAX) + goto error_ev_cfg; + + switch (mhi_event->data_type) { + case MHI_ER_DATA_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + case MHI_ER_TSYNC_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_tsync_event_ring; + break; + } + + mhi_event->hw_ring = of_property_read_bool(child, "mhi,hw-ev"); + if (mhi_event->hw_ring) + mhi_cntrl->hw_ev_rings++; + else + mhi_cntrl->sw_ev_rings++; + mhi_event->cl_manage = of_property_read_bool(child, + "mhi,client-manage"); + mhi_event->offload_ev = of_property_read_bool(child, + "mhi,offload"); + mhi_event++; + } + + /* we need msi for each event ring + additional one for BHI */ + mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1; + + return 0; + +error_ev_cfg: + + kfree(mhi_cntrl->mhi_event); + return -EINVAL; +} +static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int ret; + struct device_node *child; + u32 chan; + + ret = of_property_read_u32(of_node, "mhi,max-channels", + &mhi_cntrl->max_chan); + if (ret) + return ret; + + of_node = of_find_node_by_name(of_node, "mhi_channels"); + if (!of_node) + return -EINVAL; + + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, + sizeof(*mhi_cntrl->mhi_chan), GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* populate channel configurations */ + for_each_available_child_of_node(of_node, child) { + struct mhi_chan *mhi_chan; + + if (strcmp(child->name, "mhi_chan")) + continue; + + ret = of_property_read_u32(child, "reg", &chan); + if (ret || chan >= mhi_cntrl->max_chan) + goto error_chan_cfg; + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + ret = of_property_read_string(child, "label", + &mhi_chan->name); + if (ret) + goto error_chan_cfg; + + mhi_chan->chan = chan; + + ret = of_property_read_u32(child, "mhi,num-elements", + (u32 *)&mhi_chan->tre_ring.elements); + if (!ret && !mhi_chan->tre_ring.elements) + goto error_chan_cfg; + + /* + * For some channels, local ring len should be bigger than + * transfer ring len due to internal logical channels in device. + * So host can queue much more buffers than transfer ring len. + * Example, RSC channels should have a larger local channel + * than transfer ring length. + */ + ret = of_property_read_u32(child, "mhi,local-elements", + (u32 *)&mhi_chan->buf_ring.elements); + if (ret) + mhi_chan->buf_ring.elements = + mhi_chan->tre_ring.elements; + + ret = of_property_read_u32(child, "mhi,event-ring", + &mhi_chan->er_index); + if (ret) + goto error_chan_cfg; + + ret = of_property_read_u32(child, "mhi,chan-dir", + &mhi_chan->dir); + if (ret) + goto error_chan_cfg; + + /* + * For most channels, chtype is identical to channel directions, + * if not defined, assign ch direction to chtype + */ + ret = of_property_read_u32(child, "mhi,chan-type", + &mhi_chan->type); + if (ret) + mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; + + ret = of_property_read_u32(child, "mhi,ee", &mhi_chan->ee_mask); + if (ret) + goto error_chan_cfg; + + of_property_read_u32(child, "mhi,pollcfg", + &mhi_chan->db_cfg.pollcfg); + + ret = of_property_read_u32(child, "mhi,data-type", + &mhi_chan->xfer_type); + if (ret) + goto error_chan_cfg; + + switch (mhi_chan->xfer_type) { + case MHI_XFER_BUFFER: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_buf; + break; + case MHI_XFER_SKB: + mhi_chan->queue_xfer = mhi_queue_skb; + break; + case MHI_XFER_SCLIST: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_sclist; + break; + case MHI_XFER_NOP: + mhi_chan->queue_xfer = mhi_queue_nop; + break; + case MHI_XFER_DMA: + case MHI_XFER_RSC_DMA: + mhi_chan->queue_xfer = mhi_queue_dma; + break; + default: + goto error_chan_cfg; + } + + mhi_chan->lpm_notify = of_property_read_bool(child, + "mhi,lpm-notify"); + mhi_chan->offload_ch = of_property_read_bool(child, + "mhi,offload-chan"); + mhi_chan->db_cfg.reset_req = of_property_read_bool(child, + "mhi,db-mode-switch"); + mhi_chan->pre_alloc = of_property_read_bool(child, + "mhi,auto-queue"); + mhi_chan->auto_start = of_property_read_bool(child, + "mhi,auto-start"); + mhi_chan->wake_capable = of_property_read_bool(child, + "mhi,wake-capable"); + + if (mhi_chan->pre_alloc && + (mhi_chan->dir != DMA_FROM_DEVICE || + mhi_chan->xfer_type != MHI_XFER_BUFFER)) + goto error_chan_cfg; + + /* bi-dir and dirctionless channels must be a offload chan */ + if ((mhi_chan->dir == DMA_BIDIRECTIONAL || + mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) + goto error_chan_cfg; + + /* if mhi host allocate the buffers then client cannot queue */ + if (mhi_chan->pre_alloc) + mhi_chan->queue_xfer = mhi_queue_nop; + + if (!mhi_chan->offload_ch) { + ret = of_property_read_u32(child, "mhi,doorbell-mode", + &mhi_chan->db_cfg.brstmode); + if (ret || + MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) + goto error_chan_cfg; + + mhi_chan->db_cfg.process_db = + (mhi_chan->db_cfg.brstmode == + MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + } + + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return -EINVAL; +} +#else +static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int i, num = 0; + struct mhi_event *mhi_event; + + num = NUM_MHI_EVT_RINGS; + mhi_cntrl->total_ev_rings = num; + mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + mhi_cntrl->msi_irq_base = 0; + /* populate ev ring */ + mhi_event = mhi_cntrl->mhi_event; + i = 0; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++) { + mhi_event->er_index = i; + + mhi_event->ring.elements = NUM_MHI_EVT_RING_ELEMENTS; //Event ring length in elements + if (i == PRIMARY_EVENT_RING) + mhi_event->ring.elements = 256; //256 is enough, and 1024 some times make driver fail to open channel (reason is x6x fail to malloc) + + mhi_event->intmod = 1; //Interrupt moderation time in ms + +#ifdef ENABLE_ADPL + if (i == ADPL_EVT_RING) + mhi_event->ring.elements = 256; +#endif + +#ifdef ENABLE_QDSS + if (i == QDSS_EVT_RING) + mhi_event->ring.elements = 512; +#endif + + /* see mhi_netdev_status_cb(), when interrupt come, the napi_poll maybe scheduled, so can reduce interrupts + root@OpenWrt:/# cat /proc/interrupts | grep mhi + root@OpenWrt:/# cat /sys/kernel/debug/mhi_q/mhi_netdev/pcie_mhi_0306_00.01.00_0/rx_int + */ + if (i == IPA_IN_EVENT_RING) + mhi_event->intmod = 5; + +#ifdef ENABLE_IP_SW0 + if (i == SW_0_IN_EVT_RING) + mhi_event->intmod = 5; +#endif + +#ifdef ENABLE_ADPL + if (i == ADPL_EVT_RING) + mhi_event->intmod = 0; +#endif + +#ifdef ENABLE_QDSS + if (i == QDSS_EVT_RING) + mhi_event->intmod = 0; +#endif + + mhi_event->msi = 1 + i + mhi_cntrl->msi_irq_base; //MSI associated with this event ring + + if (i == IPA_OUT_EVENT_RING) + mhi_event->chan = MHI_CLIENT_IP_HW_0_OUT; //Dedicated channel number, if it's a dedicated event ring + else if (i == IPA_IN_EVENT_RING) + mhi_event->chan = MHI_CLIENT_IP_HW_0_IN; //Dedicated channel number, if it's a dedicated event ring +#ifdef ENABLE_IP_SW0 + else if (i == SW_0_OUT_EVT_RING) + mhi_event->chan = MHI_CLIENT_IP_SW_0_OUT; + else if (i == SW_0_IN_EVT_RING) + mhi_event->chan = MHI_CLIENT_IP_SW_0_IN; +#endif + +#ifdef ENABLE_ADPL + else if (i == ADPL_EVT_RING) + mhi_event->chan = MHI_CLIENT_ADPL; +#endif + +#ifdef ENABLE_QDSS + else if (i == QDSS_EVT_RING) + mhi_event->chan = MHI_CLIENT_IP_HW_QDSS; +#endif + else + mhi_event->chan = 0; + + /* this event ring has a dedicated channel */ + mhi_event->mhi_chan = + &mhi_cntrl->mhi_chan[mhi_event->chan]; + + mhi_event->priority = 1; //Event ring priority, set to 1 for now + + if (mhi_event->chan && mhi_event->mhi_chan->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) + mhi_event->db_cfg.brstmode = MHI_BRSTMODE_ENABLE; + else + mhi_event->db_cfg.brstmode = MHI_BRSTMODE_DISABLE; + + mhi_event->db_cfg.process_db = + (mhi_event->db_cfg.brstmode == MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + + if (i == IPA_OUT_EVENT_RING || i == IPA_IN_EVENT_RING) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#ifdef ENABLE_IP_SW0 + else if (i == SW_0_OUT_EVT_RING || i == SW_0_IN_EVT_RING) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#endif + +#ifdef ENABLE_ADPL + else if (i == ADPL_EVT_RING) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#endif + +#ifdef ENABLE_QDSS + else if (i == QDSS_EVT_RING) + mhi_event->data_type = MHI_ER_DATA_ELEMENT_TYPE; +#endif + else + mhi_event->data_type = MHI_ER_CTRL_ELEMENT_TYPE; + + switch (mhi_event->data_type) { + case MHI_ER_DATA_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_data_event_ring; + break; + case MHI_ER_CTRL_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_ctrl_ev_ring; + break; + case MHI_ER_TSYNC_ELEMENT_TYPE: + mhi_event->process_event = mhi_process_tsync_event_ring; + break; + } + + if (i == IPA_OUT_EVENT_RING || i == IPA_IN_EVENT_RING +#ifdef ENABLE_ADPL + || i == ADPL_EVT_RING +#endif +#ifdef ENABLE_QDSS + || i == QDSS_EVT_RING +#endif + ) + mhi_event->hw_ring = true; + else + mhi_event->hw_ring = false; + + if (mhi_event->hw_ring) + mhi_cntrl->hw_ev_rings++; + else + mhi_cntrl->sw_ev_rings++; + + mhi_event->cl_manage = false; + if (mhi_event->chan == MHI_CLIENT_IP_HW_0_IN || mhi_event->chan == MHI_CLIENT_IP_SW_0_IN) + mhi_event->cl_manage = true; + mhi_event->offload_ev = false; + mhi_event++; + } + + /* we need msi for each event ring + additional one for BHI */ + mhi_cntrl->msi_required = mhi_cntrl->total_ev_rings + 1 + mhi_cntrl->msi_irq_base; + + return 0; +} + +struct chan_cfg_t { + const char *chan_name; + u32 chan_id; + u32 elements; +}; + +static struct chan_cfg_t chan_cfg[] = { +//"Qualcomm PCIe Loopback" + {"LOOPBACK", MHI_CLIENT_LOOPBACK_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"LOOPBACK", MHI_CLIENT_LOOPBACK_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe Sahara" + {"SAHARA", MHI_CLIENT_SAHARA_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"SAHARA", MHI_CLIENT_SAHARA_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe Diagnostics" + {"DIAG", MHI_CLIENT_DIAG_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"DIAG", MHI_CLIENT_DIAG_IN, NUM_MHI_DIAG_IN_RING_ELEMENTS}, +//"Qualcomm PCIe QDSS Data" +//"Do not use this QDSS. xingduo.du 2023-02-16" +// {"QDSS", MHI_CLIENT_QDSS_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, +// {"QDSS", MHI_CLIENT_QDSS_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe EFS" + {"EFS", MHI_CLIENT_EFS_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"EFS", MHI_CLIENT_EFS_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe MBIM" + {"MBIM", MHI_CLIENT_MBIM_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"MBIM", MHI_CLIENT_MBIM_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe QMI" + {"QMI0", MHI_CLIENT_QMI_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"QMI0", MHI_CLIENT_QMI_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe QMI" + //{"QMI1", MHI_CLIENT_QMI_2_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + //{"QMI1", MHI_CLIENT_QMI_2_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe IP CTRL" + {"IP_CTRL", MHI_CLIENT_IP_CTRL_1_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"IP_CTRL", MHI_CLIENT_IP_CTRL_1_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#if 0 //AG15 +//"Qualcomm PCIe IPCR" + {"IPCR", MHI_CLIENT_DIAG_CONS_IF_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"IPCR", MHI_CLIENT_DIAG_CONS_IF_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#endif +//"Qualcomm PCIe Boot Logging" + //{"BL", MHI_CLIENT_BOOT_LOG_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + //{"BL", MHI_CLIENT_BOOT_LOG_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm PCIe Modem" + {"DUN", MHI_CLIENT_DUN_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"DUN", MHI_CLIENT_DUN_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +//"Qualcomm EDL " + {"EDL", MHI_CLIENT_EDL_OUT, NUM_MHI_CHAN_RING_ELEMENTS}, + {"EDL", MHI_CLIENT_EDL_IN, NUM_MHI_CHAN_RING_ELEMENTS}, +#ifdef ENABLE_IP_SW0 +//"Qualcomm PCIe LOCAL Adapter" + {"IP_SW0", MHI_CLIENT_IP_SW_0_OUT, NUM_MHI_SW_IP_RING_ELEMENTS}, + {"IP_SW0", MHI_CLIENT_IP_SW_0_IN, NUM_MHI_SW_IP_RING_ELEMENTS}, +#endif +//"Qualcomm PCIe WWAN Adapter" + {"IP_HW0", MHI_CLIENT_IP_HW_0_OUT, NUM_MHI_IPA_OUT_RING_ELEMENTS}, + {"IP_HW0", MHI_CLIENT_IP_HW_0_IN, NUM_MHI_IPA_IN_RING_ELEMENTS}, +#ifdef ENABLE_ADPL + {"ADPL", MHI_CLIENT_ADPL, NUM_MHI_ADPL_RING_ELEMENTS}, +#endif + +#ifdef ENABLE_QDSS + {"QDSS", MHI_CLIENT_IP_HW_QDSS, NUM_MHI_QDSS_RING_ELEMENTS}, +#endif +}; + +extern int mhi_netdev_mbin_enabled(void); +static int of_parse_ch_cfg(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + u32 chan; + u32 i, num; + u32 ring = 0; + + mhi_cntrl->max_chan = MHI_MAX_CHANNELS; + num = sizeof(chan_cfg)/sizeof(chan_cfg[0]); + + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, + sizeof(*mhi_cntrl->mhi_chan), GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); + + /* populate channel configurations */ + for (i = 0; i < num; i++) { + struct mhi_chan *mhi_chan; + + if (!strncmp( chan_cfg[i].chan_name, "MBIM", 4)) { + if (!mhi_netdev_mbin_enabled()) + continue; + } + else if (!strncmp( chan_cfg[i].chan_name, "QMI", 3)) { + if (mhi_netdev_mbin_enabled()) + continue; + } + + chan = chan_cfg[i].chan_id; + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + mhi_chan->name = chan_cfg[i].chan_name; + + mhi_chan->chan = chan; + + mhi_chan->tre_ring.elements = chan_cfg[i].elements; + + /* + * For some channels, local ring len should be bigger than + * transfer ring len due to internal logical channels in device. + * So host can queue much more buffers than transfer ring len. + * Example, RSC channels should have a larger local channel + * than transfer ring length. + */ + mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; + + if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_DIAG_IN + || chan == MHI_CLIENT_IP_SW_0_OUT || chan == MHI_CLIENT_IP_SW_0_IN +#ifdef ENABLE_ADPL + || chan == MHI_CLIENT_ADPL +#endif +#ifdef ENABLE_QDSS + || chan == MHI_CLIENT_IP_HW_QDSS +#endif + ) { + mhi_chan->ring = 0; + } + else { + mhi_chan->ring = ring; + ring += mhi_chan->buf_ring.elements; + } + + if (chan == MHI_CLIENT_IP_HW_0_OUT) + mhi_chan->er_index = IPA_OUT_EVENT_RING; + else if (chan == MHI_CLIENT_IP_HW_0_IN) + mhi_chan->er_index = IPA_IN_EVENT_RING; +#ifdef ENABLE_IP_SW0 + else if (chan == MHI_CLIENT_IP_SW_0_OUT) + mhi_chan->er_index = SW_0_OUT_EVT_RING; + else if (chan == MHI_CLIENT_IP_SW_0_IN) + mhi_chan->er_index = SW_0_IN_EVT_RING; +#endif + +#ifdef ENABLE_ADPL + else if (chan == MHI_CLIENT_ADPL) + mhi_chan->er_index = ADPL_EVT_RING; +#endif +#ifdef ENABLE_QDSS + else if (chan == MHI_CLIENT_IP_HW_QDSS) + mhi_chan->er_index = QDSS_EVT_RING; +#endif + else + mhi_chan->er_index = PRIMARY_EVENT_RING; + + mhi_chan->dir = CHAN_INBOUND(chan) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + +#ifdef ENABLE_ADPL + if (chan == MHI_CLIENT_ADPL) + mhi_chan->dir = DMA_FROM_DEVICE; +#endif + +#ifdef ENABLE_QDSS + if (chan == MHI_CLIENT_IP_HW_QDSS) + mhi_chan->dir = DMA_FROM_DEVICE; +#endif + /* + * For most channels, chtype is identical to channel directions, + * if not defined, assign ch direction to chtype + */ + mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; + + mhi_chan->ee_mask = BIT(MHI_EE_AMSS); + if (CHAN_SBL(chan)) + mhi_chan->ee_mask = BIT(MHI_EE_SBL); + else if (CHAN_EDL(chan)) + mhi_chan->ee_mask = BIT(MHI_EE_FP); + + mhi_chan->db_cfg.pollcfg = 0; + + if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_SW_0_OUT) + mhi_chan->xfer_type = MHI_XFER_SKB; + else if (chan == MHI_CLIENT_IP_HW_0_IN || chan == MHI_CLIENT_IP_SW_0_IN) + mhi_chan->xfer_type = MHI_XFER_SKB; //MHI_XFER_DMA; +#ifdef ENABLE_ADPL + else if (chan == MHI_CLIENT_ADPL) + mhi_chan->xfer_type = MHI_XFER_BUFFER; +#endif +#ifdef ENABLE_QDSS + else if (chan == MHI_CLIENT_IP_HW_QDSS) + mhi_chan->xfer_type = MHI_XFER_BUFFER; +#endif + else + mhi_chan->xfer_type = MHI_XFER_BUFFER; + + if (chan_cfg[i].elements == 0) { + mhi_chan->dir = DMA_BIDIRECTIONAL; + mhi_chan->xfer_type = MHI_XFER_NOP; + } + + switch (mhi_chan->xfer_type) { + case MHI_XFER_BUFFER: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_buf; + break; + case MHI_XFER_SKB: + mhi_chan->queue_xfer = mhi_queue_skb; + break; + case MHI_XFER_SCLIST: + mhi_chan->gen_tre = mhi_gen_tre; + mhi_chan->queue_xfer = mhi_queue_sclist; + break; + case MHI_XFER_NOP: + mhi_chan->queue_xfer = mhi_queue_nop; + break; + case MHI_XFER_DMA: + case MHI_XFER_RSC_DMA: + mhi_chan->queue_xfer = mhi_queue_dma; + break; + default: + goto error_chan_cfg; + } + + mhi_chan->lpm_notify = false; + mhi_chan->offload_ch = (chan_cfg[i].elements == 0); + mhi_chan->db_cfg.reset_req = false; + mhi_chan->pre_alloc = false; + mhi_chan->auto_start = false; + mhi_chan->wake_capable = false; + + if (mhi_chan->pre_alloc && + (mhi_chan->dir != DMA_FROM_DEVICE || + mhi_chan->xfer_type != MHI_XFER_BUFFER)) + goto error_chan_cfg; + + /* bi-dir and dirctionless channels must be a offload chan */ + if ((mhi_chan->dir == DMA_BIDIRECTIONAL || + mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) + goto error_chan_cfg; + + /* if mhi host allocate the buffers then client cannot queue */ + if (mhi_chan->pre_alloc) + mhi_chan->queue_xfer = mhi_queue_nop; + + if (!mhi_chan->offload_ch) { + mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_DISABLE; + if (chan == MHI_CLIENT_IP_HW_0_OUT || chan == MHI_CLIENT_IP_HW_0_IN) + mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_ENABLE; + +#ifdef ENABLE_ADPL + if (chan == MHI_CLIENT_ADPL) + mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_DISABLE; +#endif +#ifdef ENABLE_QDSS + if (chan == MHI_CLIENT_IP_HW_QDSS) + mhi_chan->db_cfg.brstmode = MHI_BRSTMODE_DISABLE; +#endif + if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) + goto error_chan_cfg; + + mhi_chan->db_cfg.process_db = + (mhi_chan->db_cfg.brstmode == + MHI_BRSTMODE_ENABLE) ? + mhi_db_brstmode : mhi_db_brstmode_disable; + } + + mhi_chan->configured = true; + + if (mhi_chan->lpm_notify) + list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return -EINVAL; +} +#endif + +static int of_parse_dt(struct mhi_controller *mhi_cntrl, + struct device_node *of_node) +{ + int ret; + + /* parse MHI channel configuration */ + ret = of_parse_ch_cfg(mhi_cntrl, of_node); + if (ret) + return ret; + + /* parse MHI event configuration */ + ret = of_parse_ev_cfg(mhi_cntrl, of_node); + if (ret) + goto error_ev_cfg; +#if defined(QCOM_AP_QCA6490_DMA_IOMMU) + /* for QCS6490 iommu-dma is fastmap + for SG845 iommu-dma is set in driver + for ipq iommu-dma is disabled + */ + const char *str; + ret = of_property_read_string(of_node, "qcom,iommu-dma", &str); + if (ret) + MHI_ERR("mhi qcom,iommu-dma need set"); + +#endif +#if 0 + ret = of_property_read_u32(of_node, "mhi,timeout", + &mhi_cntrl->timeout_ms); + if (ret) + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + + mhi_cntrl->bounce_buf = of_property_read_bool(of_node, "mhi,use-bb"); + ret = of_property_read_u32(of_node, "mhi,buffer-len", + (u32 *)&mhi_cntrl->buffer_len); + if (ret) + mhi_cntrl->buffer_len = MHI_MAX_MTU; +#else + mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + mhi_cntrl->bounce_buf = false; + mhi_cntrl->buffer_len = MHI_MAX_MTU; +#endif + + return 0; + +error_ev_cfg: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} + +int of_register_mhi_controller(struct mhi_controller *mhi_cntrl) +{ + int ret; + int i; + struct mhi_event *mhi_event; + struct mhi_chan *mhi_chan; + struct mhi_cmd *mhi_cmd; + struct mhi_device *mhi_dev; + + //if (!mhi_cntrl->of_node) + // return -EINVAL; + + for (i = 0; i < MAX_MHI_CONTROLLER; i++) { + if (mhi_controller_minors[i].dev_id == mhi_cntrl->dev_id + && mhi_controller_minors[i].domain == mhi_cntrl->domain + && mhi_controller_minors[i].bus == mhi_cntrl->bus + && mhi_controller_minors[i].slot == mhi_cntrl->slot) { + mhi_cntrl->cntrl_idx = i; + break; + } + else if (mhi_controller_minors[i].dev_id == 0 + && mhi_controller_minors[i].domain == 0 + && mhi_controller_minors[i].bus == 0 + && mhi_controller_minors[i].slot == 0) { + mhi_controller_minors[i].dev_id = mhi_cntrl->dev_id; + mhi_controller_minors[i].domain = mhi_cntrl->domain; + mhi_controller_minors[i].bus = mhi_cntrl->bus; + mhi_controller_minors[i].slot = mhi_cntrl->slot; + mhi_controller_minors[i].mhi_cntrl = mhi_cntrl; + mhi_cntrl->cntrl_idx = i; + break; + } + } + + if (i == MAX_MHI_CONTROLLER) + return -EINVAL; + + if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put) + return -EINVAL; + + if (!mhi_cntrl->status_cb || !mhi_cntrl->link_status) + return -EINVAL; + + ret = of_parse_dt(mhi_cntrl, mhi_cntrl->of_node); + if (ret) + return -EINVAL; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, + sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto error_alloc_cmd; + } + + INIT_LIST_HEAD(&mhi_cntrl->transition_list); + mutex_init(&mhi_cntrl->pm_mutex); + rwlock_init(&mhi_cntrl->pm_lock); + spin_lock_init(&mhi_cntrl->transition_lock); + spin_lock_init(&mhi_cntrl->wlock); + INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); + INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker); + INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker); + INIT_DELAYED_WORK(&mhi_cntrl->ready_worker, mhi_pm_ready_worker); + init_waitqueue_head(&mhi_cntrl->state_event); + + mhi_cmd = mhi_cntrl->mhi_cmd; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) + spin_lock_init(&mhi_cmd->lock); + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + mhi_event->mhi_cntrl = mhi_cntrl; + spin_lock_init(&mhi_event->lock); + if (mhi_event->data_type == MHI_ER_CTRL_ELEMENT_TYPE) + tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, + (ulong)mhi_event); + else + tasklet_init(&mhi_event->task, mhi_ev_task, + (ulong)mhi_event); + } + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + mutex_init(&mhi_chan->mutex); + init_completion(&mhi_chan->completion); + rwlock_init(&mhi_chan->lock); + } + + if (mhi_cntrl->bounce_buf) { + mhi_cntrl->map_single = mhi_map_single_use_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; + } else { + mhi_cntrl->map_single = mhi_map_single_no_bb; + mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; + } + + /* register controller with mhi_bus */ + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) { + ret = -ENOMEM; + goto error_alloc_dev; + } + + mhi_dev->dev_type = MHI_CONTROLLER_TYPE; + mhi_dev->mhi_cntrl = mhi_cntrl; + dev_set_name(&mhi_dev->dev, "%04x_%02u.%02u.%02u", mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot); + + ret = device_add(&mhi_dev->dev); + if (ret) + goto error_add_dev; + + if (mhi_cntrl->cntrl_idx) + mhi_cntrl->cntrl_dev = device_create(mhi_cntrl_drv.class, mhi_cntrl->dev, + MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx), NULL, + "mhi_BHI%d", mhi_cntrl->cntrl_idx); + else + mhi_cntrl->cntrl_dev = device_create(mhi_cntrl_drv.class, mhi_cntrl->dev, + MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx), NULL, + "mhi_BHI"); + + mhi_cntrl->mhi_dev = mhi_dev; + + mhi_cntrl->parent = mhi_bus.dentry; + mhi_cntrl->klog_lvl = MHI_MSG_LVL_ERROR; + + /* adding it to this list only for debug purpose */ + mutex_lock(&mhi_bus.lock); + list_add_tail(&mhi_cntrl->node, &mhi_bus.controller_list); + mutex_unlock(&mhi_bus.lock); + + return 0; + +error_add_dev: + mhi_dealloc_device(mhi_cntrl, mhi_dev); + +error_alloc_dev: + kfree(mhi_cntrl->mhi_cmd); + +error_alloc_cmd: + kfree(mhi_cntrl->mhi_chan); + kfree(mhi_cntrl->mhi_event); + + return ret; +}; +EXPORT_SYMBOL(of_register_mhi_controller); + +void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_event); + kfree(mhi_cntrl->mhi_chan); + kfree(mhi_cntrl->mhi_tsync); + + if (mhi_cntrl->cntrl_dev) + device_destroy(mhi_cntrl_drv.class, MKDEV(mhi_cntrl_drv.major, mhi_cntrl->cntrl_idx)); + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + mutex_lock(&mhi_bus.lock); + list_del(&mhi_cntrl->node); + mutex_unlock(&mhi_bus.lock); +} + +/* set ptr to control private data */ +static inline void mhi_controller_set_devdata(struct mhi_controller *mhi_cntrl, + void *priv) +{ + mhi_cntrl->priv_data = priv; +} + + +/* allocate mhi controller to register */ +struct mhi_controller *mhi_alloc_controller(size_t size) +{ + struct mhi_controller *mhi_cntrl; + + mhi_cntrl = kzalloc(size + sizeof(*mhi_cntrl), GFP_KERNEL); + + if (mhi_cntrl && size) + mhi_controller_set_devdata(mhi_cntrl, mhi_cntrl + 1); + + return mhi_cntrl; +} +EXPORT_SYMBOL(mhi_alloc_controller); + +int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 bhie_off; + + mutex_lock(&mhi_cntrl->pm_mutex); + + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) { + MHI_ERR("Error with init dev_ctxt\n"); + goto error_dev_ctxt; + } + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting up irq\n"); + goto error_setup_irq; + } + + /* + * allocate rddm table if specified, this table is for debug purpose + * so we'll ignore erros + */ + if (mhi_cntrl->rddm_size) { + mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, + mhi_cntrl->rddm_size); + + /* + * This controller supports rddm, we need to manually clear + * BHIE RX registers since por values are undefined. + */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, + &bhie_off); + if (ret) { + MHI_ERR("Error getting bhie offset\n"); + goto bhie_error; + } + + memset_io(mhi_cntrl->regs + bhie_off + BHIE_RXVECADDR_LOW_OFFS, + 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + + 4); + } + + mhi_cntrl->pre_init = true; + + mutex_unlock(&mhi_cntrl->pm_mutex); + + return 0; + +bhie_error: + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + mhi_deinit_free_irq(mhi_cntrl); + +error_setup_irq: + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL(mhi_prepare_for_power_up); + +void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + + if (mhi_cntrl->rddm_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); + mhi_cntrl->rddm_image = NULL; + } + + mhi_deinit_free_irq(mhi_cntrl); + mhi_deinit_dev_ctxt(mhi_cntrl); + mhi_cntrl->pre_init = false; +} + +/* match dev to drv */ +static int mhi_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + const struct mhi_device_id *id; + + /* if controller type there is no client driver associated with it */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->chan_name, id->chan)) { + mhi_dev->id = id; + return 1; + } + + return 0; +}; + +static void mhi_release_device(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +struct bus_type mhi_bus_type = { + .name = "mhi_q", + .dev_name = "mhi_q", + .match = mhi_match, +}; + +static int mhi_driver_probe(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device_driver *drv = dev->driver; + struct mhi_driver *mhi_drv = to_mhi_driver(drv); + struct mhi_event *mhi_event; + struct mhi_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_chan *dl_chan = mhi_dev->dl_chan; + bool auto_start = false; + int ret; + + /* bring device out of lpm */ + ret = mhi_device_get_sync(mhi_dev); + if (ret) + return ret; + + ret = -EINVAL; + if (ul_chan) { + /* lpm notification require status_cb */ + if (ul_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) + goto exit_probe; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + mhi_dev->status_cb = mhi_drv->status_cb; + auto_start = ul_chan->auto_start; + } + + if (dl_chan) { + if (dl_chan->lpm_notify && !mhi_drv->status_cb) + goto exit_probe; + + if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) + goto exit_probe; + + mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; + + /* + * if this channal event ring manage by client, then + * status_cb must be defined so we can send the async + * cb whenever there are pending data + */ + if (mhi_event->cl_manage && !mhi_drv->status_cb) + goto exit_probe; + + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + + /* ul & dl uses same status cb */ + mhi_dev->status_cb = mhi_drv->status_cb; + auto_start = (auto_start || dl_chan->auto_start); + } + + ret = mhi_drv->probe(mhi_dev, mhi_dev->id); + + if (!ret && auto_start) + mhi_prepare_for_transfer(mhi_dev); + +exit_probe: + mhi_device_put(mhi_dev); + + return ret; +} + +static int mhi_driver_remove(struct device *dev) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + enum MHI_CH_STATE ch_state[] = { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_DISABLED + }; + int dir; + + /* control device has no work to do */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + + MHI_LOG("Removing device for chan:%s\n", mhi_dev->chan_name); + + /* reset both channels */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + /* wake all threads waiting for completion */ + write_lock_irq(&mhi_chan->lock); + mhi_chan->ccs = MHI_EV_CC_INVALID; + complete_all(&mhi_chan->completion); + write_unlock_irq(&mhi_chan->lock); + + /* move channel state to disable, no more processing */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + ch_state[dir] = mhi_chan->ch_state; + mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; + write_unlock_irq(&mhi_chan->lock); + + /* reset the channel */ + if (!mhi_chan->offload_ch) + mhi_reset_chan(mhi_cntrl, mhi_chan); + + mutex_unlock(&mhi_chan->mutex); + } + + /* destroy the device */ + mhi_drv->remove(mhi_dev); + + /* de_init channel if it was enabled */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->mutex); + + if (ch_state[dir] == MHI_CH_STATE_ENABLED && + !mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + + mutex_unlock(&mhi_chan->mutex); + } + + + if (mhi_cntrl->tsync_dev == mhi_dev) + mhi_cntrl->tsync_dev = NULL; + + /* relinquish any pending votes */ + read_lock_bh(&mhi_cntrl->pm_lock); + while (atomic_read(&mhi_dev->dev_wake)) + mhi_device_put(mhi_dev); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; +} + +int mhi_driver_register(struct mhi_driver *mhi_drv) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + driver->bus = &mhi_bus_type; + driver->probe = mhi_driver_probe; + driver->remove = mhi_driver_remove; + return driver_register(driver); +} +EXPORT_SYMBOL(mhi_driver_register); + +void mhi_driver_unregister(struct mhi_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL(mhi_driver_unregister); + +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + struct device *dev; + + if (!mhi_dev) + return NULL; + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_bus_type; + dev->release = mhi_release_device; + dev->parent = mhi_cntrl->dev; + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->vendor = mhi_cntrl->vendor; + mhi_dev->dev_id = mhi_cntrl->dev_id; + mhi_dev->domain = mhi_cntrl->domain; + mhi_dev->bus = mhi_cntrl->bus; + mhi_dev->slot = mhi_cntrl->slot; + mhi_dev->mtu = MHI_MAX_MTU; + atomic_set(&mhi_dev->dev_wake, 0); + + return mhi_dev; +} + +static int mhi_cntrl_open(struct inode *inode, struct file *f) +{ + int ret = -ENODEV; + struct mhi_controller *mhi_cntrl; + + mutex_lock(&mhi_bus.lock); + list_for_each_entry(mhi_cntrl, &mhi_bus.controller_list, node) { + if (MINOR(inode->i_rdev) == mhi_cntrl->cntrl_idx) { + ret = 0; + f->private_data = mhi_cntrl; + break; + } + } + mutex_unlock(&mhi_bus.lock); + + return ret; +} + +static int mhi_cntrl_release(struct inode *inode, struct file *f) +{ + f->private_data = NULL; + return 0; +} + +#define IOCTL_BHI_GETDEVINFO 0x8BE0 + 1 +#define IOCTL_BHI_WRITEIMAGE 0x8BE0 + 2 +long bhi_get_dev_info(struct mhi_controller *mhi_cntrl, void __user *to); +long bhi_write_image(struct mhi_controller *mhi_cntrl, void __user *from); + +static long mhi_cntrl_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) +{ + long ret = -ENODEV; + struct mhi_controller *mhi_cntrl; + + mutex_lock(&mhi_bus.lock); + list_for_each_entry(mhi_cntrl, &mhi_bus.controller_list, node) { + if (mhi_cntrl == (struct mhi_controller *)f->private_data) { + ret = 0; + break; + } + } + mutex_unlock(&mhi_bus.lock); + + if (ret) + return ret; + + switch (cmd) { + case IOCTL_BHI_GETDEVINFO: + ret = bhi_get_dev_info(mhi_cntrl, (void __user *)__arg); + break; + + case IOCTL_BHI_WRITEIMAGE: + ret = bhi_write_image(mhi_cntrl, (void __user *)__arg); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static const struct file_operations mhi_cntrl_fops = { + .unlocked_ioctl = mhi_cntrl_ioctl, + .open = mhi_cntrl_open, + .release = mhi_cntrl_release, +}; + +static int __init mhi_cntrl_init(void) +{ + int ret; + + ret = register_chrdev(0, MHI_CNTRL_DRIVER_NAME, &mhi_cntrl_fops); + if (ret < 0) + return ret; + + mhi_cntrl_drv.major = ret; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0)) + mhi_cntrl_drv.class = class_create(MHI_CNTRL_DRIVER_NAME); +#else + mhi_cntrl_drv.class = class_create(THIS_MODULE, MHI_CNTRL_DRIVER_NAME); +#endif + if (IS_ERR(mhi_cntrl_drv.class)) { + unregister_chrdev(mhi_cntrl_drv.major, MHI_CNTRL_DRIVER_NAME); + return -ENODEV; + } + + mutex_init(&mhi_cntrl_drv.lock); + INIT_LIST_HEAD(&mhi_cntrl_drv.head); + + return 0; +} + +void mhi_cntrl_exit(void) +{ + class_destroy(mhi_cntrl_drv.class); + unregister_chrdev(mhi_cntrl_drv.major, MHI_CNTRL_DRIVER_NAME); +} + +extern int mhi_dtr_init(void); +extern void mhi_dtr_exit(void); +extern int mhi_device_netdev_init(struct dentry *parent); +extern void mhi_device_netdev_exit(void); +extern int mhi_device_uci_init(void); +extern void mhi_device_uci_exit(void); +extern int mhi_controller_qcom_init(void); +extern void mhi_controller_qcom_exit(void); + +static char mhi_version[] = "Quectel_Linux_PCIE_MHI_Driver_"PCIE_MHI_DRIVER_VERSION; +module_param_string(mhi_version, mhi_version, sizeof(mhi_version), S_IRUGO); + +static int __init mhi_init(void) +{ + int ret; + + pr_info("%s %s\n", __func__, mhi_version); + + mutex_init(&mhi_bus.lock); + INIT_LIST_HEAD(&mhi_bus.controller_list); + + /* parent directory */ + mhi_bus.dentry = debugfs_create_dir(mhi_bus_type.name, NULL); + + ret = bus_register(&mhi_bus_type); + if (ret) { + pr_err("Error bus_register ret:%d\n", ret); + return ret; + } + + ret = mhi_dtr_init(); + if (ret) { + pr_err("Error mhi_dtr_init ret:%d\n", ret); + bus_unregister(&mhi_bus_type); + return ret; + } + + ret = mhi_device_netdev_init(mhi_bus.dentry); + if (ret) { + pr_err("Error mhi_device_netdev_init ret:%d\n", ret); + } + + ret = mhi_device_uci_init(); + if (ret) { + pr_err("Error mhi_device_uci_init ret:%d\n", ret); + } + + ret = mhi_cntrl_init(); + if (ret) { + pr_err("Error mhi_cntrl_init ret:%d\n", ret); + } + + ret = mhi_controller_qcom_init(); + if (ret) { + pr_err("Error mhi_controller_qcom_init ret:%d\n", ret); + } + + return ret; +} + +static void mhi_exit(void) +{ + mhi_controller_qcom_exit(); + mhi_cntrl_exit(); + mhi_device_uci_exit(); + mhi_device_netdev_exit(); + mhi_dtr_exit(); + bus_unregister(&mhi_bus_type); + debugfs_remove_recursive(mhi_bus.dentry); +} + +module_init(mhi_init); +module_exit(mhi_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("MHI_CORE"); +MODULE_DESCRIPTION("MHI Host Interface"); diff --git a/wwan/driver/quectel_MHI/src/core/mhi_internal.h b/wwan/driver/quectel_MHI/src/core/mhi_internal.h new file mode 100644 index 0000000..09f3aac --- /dev/null +++ b/wwan/driver/quectel_MHI/src/core/mhi_internal.h @@ -0,0 +1,1190 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#ifndef _MHI_INT_H +#define _MHI_INT_H + +#include +#ifndef writel_relaxed +#define writel_relaxed writel +#endif + +#ifndef writel_relaxed_no_log +#define writel_relaxed_no_log writel_relaxed +#endif + +#ifndef readq +static inline u64 readq(void __iomem *reg) +{ + return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); +} +#endif + +#ifndef readq_relaxed +#define readq_relaxed readq +#endif + +#ifndef readq_relaxed_no_log +#define readq_relaxed_no_log readq_relaxed +#endif + +#ifndef U32_MAX +#define U32_MAX ((u32)~0U) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,10,53 )) +static inline void reinit_completion(struct completion *x) +{ + x->done = 0; +} +#endif + +#ifndef __ATTR_RO +#define __ATTR_RO(_name) { \ + .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ + .show = _name##_show, \ +} +#endif +#ifndef __ATTR_WO +#define __ATTR_WO(_name) { \ + .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \ + .store = _name##_store, \ +} +#endif +#ifndef __ATTR_RW +#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \ + _name##_show, _name##_store) +#endif +#ifndef DEVICE_ATTR_RO +#define DEVICE_ATTR_RO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RO(_name) +#endif +#ifndef DEVICE_ATTR_WO +#define DEVICE_ATTR_WO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_WO(_name) +#endif +#ifndef DEVICE_ATTR_RW +#define DRIVER_ATTR_RW(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) +#endif + +#ifdef EXPORT_SYMBOL +#undef EXPORT_SYMBOL +#define EXPORT_SYMBOL(sym) +#endif + +extern struct bus_type mhi_bus_type; + +/* MHI mmio register mapping */ +#define PCI_INVALID_READ(val) (val == U32_MAX) + +#define MHIREGLEN (0x0) +#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF) +#define MHIREGLEN_MHIREGLEN_SHIFT (0) + +#define MHIVER (0x8) +#define MHIVER_MHIVER_MASK (0xFFFFFFFF) +#define MHIVER_MHIVER_SHIFT (0) + +#define MHICFG (0x10) +#define MHICFG_NHWER_MASK (0xFF000000) +#define MHICFG_NHWER_SHIFT (24) +#define MHICFG_NER_MASK (0xFF0000) +#define MHICFG_NER_SHIFT (16) +#define MHICFG_NHWCH_MASK (0xFF00) +#define MHICFG_NHWCH_SHIFT (8) +#define MHICFG_NCH_MASK (0xFF) +#define MHICFG_NCH_SHIFT (0) + +#define CHDBOFF (0x18) +#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF) +#define CHDBOFF_CHDBOFF_SHIFT (0) + +#define ERDBOFF (0x20) +#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF) +#define ERDBOFF_ERDBOFF_SHIFT (0) + +#define BHIOFF (0x28) +#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF) +#define BHIOFF_BHIOFF_SHIFT (0) + +#define BHIEOFF (0x2C) +#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF) +#define BHIEOFF_BHIEOFF_SHIFT (0) + +#define DEBUGOFF (0x30) +#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF) +#define DEBUGOFF_DEBUGOFF_SHIFT (0) + +#define MHICTRL (0x38) +#define MHICTRL_MHISTATE_MASK (0x0000FF00) +#define MHICTRL_MHISTATE_SHIFT (8) +#define MHICTRL_RESET_MASK (0x2) +#define MHICTRL_RESET_SHIFT (1) + +#define MHISTATUS (0x48) +#define MHISTATUS_MHISTATE_MASK (0x0000FF00) +#define MHISTATUS_MHISTATE_SHIFT (8) +#define MHISTATUS_SYSERR_MASK (0x4) +#define MHISTATUS_SYSERR_SHIFT (2) +#define MHISTATUS_READY_MASK (0x1) +#define MHISTATUS_READY_SHIFT (0) + +#define CCABAP_LOWER (0x58) +#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF) +#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0) + +#define CCABAP_HIGHER (0x5C) +#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF) +#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0) + +#define ECABAP_LOWER (0x60) +#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF) +#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0) + +#define ECABAP_HIGHER (0x64) +#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF) +#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0) + +#define CRCBAP_LOWER (0x68) +#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF) +#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0) + +#define CRCBAP_HIGHER (0x6C) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0) + +#define CRDB_LOWER (0x70) +#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF) +#define CRDB_LOWER_CRDB_LOWER_SHIFT (0) + +#define CRDB_HIGHER (0x74) +#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF) +#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0) + +#define MHICTRLBASE_LOWER (0x80) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0) + +#define MHICTRLBASE_HIGHER (0x84) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0) + +#define MHICTRLLIMIT_LOWER (0x88) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0) + +#define MHICTRLLIMIT_HIGHER (0x8C) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0) + +#define MHIDATABASE_LOWER (0x98) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0) + +#define MHIDATABASE_HIGHER (0x9C) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0) + +#define MHIDATALIMIT_LOWER (0xA0) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0) + +#define MHIDATALIMIT_HIGHER (0xA4) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0) + +#define MHI_READ_REG_FIELD(_VAR,_REG,_FIELD) \ + ((_VAR & _REG ## _ ## _FIELD ## _MASK) >> _REG ## _ ## _FIELD ## _SHIFT) + +#define MHI_WRITE_REG_FIELD(_VAR,_REG,_FIELD,_VAL) \ + do { \ + _VAR &= ~_REG ## _ ## _FIELD ## _MASK; \ + _VAR |= (_VAL << _REG ## _ ## _FIELD ## _SHIFT); \ + } while(0) + +/* Host request register */ +#define MHI_SOC_RESET_REQ_OFFSET (0xB0) +#define MHI_SOC_RESET_REQ BIT(0) + +/* MHI misc capability registers */ +#define MISC_OFFSET (0x24) +#define MISC_CAP_MASK (0xFFFFFFFF) +#define MISC_CAP_SHIFT (0) + +#define CAP_CAPID_MASK (0xFF000000) +#define CAP_CAPID_SHIFT (24) +#define CAP_NEXT_CAP_MASK (0x00FFF000) +#define CAP_NEXT_CAP_SHIFT (12) + +/* MHI Timesync offsets */ +#define TIMESYNC_CFG_OFFSET (0x00) +#define TIMESYNC_CFG_CAPID_MASK (CAP_CAPID_MASK) +#define TIMESYNC_CFG_CAPID_SHIFT (CAP_CAPID_SHIFT) +#define TIMESYNC_CFG_NEXT_OFF_MASK (CAP_NEXT_CAP_MASK) +#define TIMESYNC_CFG_NEXT_OFF_SHIFT (CAP_NEXT_CAP_SHIFT) +#define TIMESYNC_CFG_NUMCMD_MASK (0xFF) +#define TIMESYNC_CFG_NUMCMD_SHIFT (0) +#define TIMESYNC_DB_OFFSET (0x4) +#define TIMESYNC_TIME_LOW_OFFSET (0x8) +#define TIMESYNC_TIME_HIGH_OFFSET (0xC) + +#define TIMESYNC_CAP_ID (2) + +/* MHI BHI offfsets */ +#define BHI_BHIVERSION_MINOR (0x00) +#define BHI_BHIVERSION_MAJOR (0x04) +#define BHI_IMGADDR_LOW (0x08) +#define BHI_IMGADDR_HIGH (0x0C) +#define BHI_IMGSIZE (0x10) +#define BHI_RSVD1 (0x14) +#define BHI_IMGTXDB (0x18) +#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHI_TXDB_SEQNUM_SHFT (0) +#define BHI_RSVD2 (0x1C) +#define BHI_INTVEC (0x20) +#define BHI_RSVD3 (0x24) +#define BHI_EXECENV (0x28) +#define BHI_STATUS (0x2C) +#define BHI_ERRCODE (0x30) +#define BHI_ERRDBG1 (0x34) +#define BHI_ERRDBG2 (0x38) +#define BHI_ERRDBG3 (0x3C) +#define BHI_SERIALNU (0x40) +#define BHI_SBLANTIROLLVER (0x44) +#define BHI_NUMSEG (0x48) +#define BHI_MSMHWID(n) (0x4C + (0x4 * n)) +#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n)) +#define BHI_RSVD5 (0xC4) +#define BHI_STATUS_MASK (0xC0000000) +#define BHI_STATUS_SHIFT (30) +#define BHI_STATUS_ERROR (3) +#define BHI_STATUS_SUCCESS (2) +#define BHI_STATUS_RESET (0) + +/* MHI BHIE offsets */ +#define BHIE_MSMSOCID_OFFS (0x0000) +#define BHIE_TXVECADDR_LOW_OFFS (0x002C) +#define BHIE_TXVECADDR_HIGH_OFFS (0x0030) +#define BHIE_TXVECSIZE_OFFS (0x0034) +#define BHIE_TXVECDB_OFFS (0x003C) +#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECDB_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_OFFS (0x0044) +#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_TXVECSTATUS_STATUS_SHFT (30) +#define BHIE_TXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03) +#define BHIE_RXVECADDR_LOW_OFFS (0x0060) +#define BHIE_RXVECADDR_HIGH_OFFS (0x0064) +#define BHIE_RXVECSIZE_OFFS (0x0068) +#define BHIE_RXVECDB_OFFS (0x0070) +#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECDB_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_OFFS (0x0078) +#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF) +#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0) +#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000) +#define BHIE_RXVECSTATUS_STATUS_SHFT (30) +#define BHIE_RXVECSTATUS_STATUS_RESET (0x00) +#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02) +#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03) + +/* convert ticks to micro seconds by dividing by 19.2 */ +#define TIME_TICKS_TO_US(x) (div_u64((x) * 10, 192)) + +struct mhi_event_ctxt { + u32 reserved : 8; + u32 intmodc : 8; + u32 intmodt : 16; + u32 ertype; + u32 msivec; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_chan_ctxt { + u32 chstate : 8; + u32 brstmode : 2; + u32 pollcfg : 6; + u32 reserved : 16; + u32 chtype; + u32 erindex; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_cmd_ctxt { + u32 reserved0; + u32 reserved1; + u32 reserved2; + + u64 rbase __packed __aligned(4); + u64 rlen __packed __aligned(4); + u64 rp __packed __aligned(4); + u64 wp __packed __aligned(4); +}; + +struct mhi_tre { + u64 ptr; + u32 dword[2]; +}; + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + uint32_t /*enum mhi_dev_state*/ mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + uint32_t /*enum mhi_dev_state*/ mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element cmd_transfer; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +struct bhi_vec_entry { + u64 dma_addr; + u64 size; +}; + +enum mhi_cmd_type { + MHI_CMD_TYPE_NOP = 1, + MHI_CMD_TYPE_RESET = 16, + MHI_CMD_TYPE_STOP = 17, + MHI_CMD_TYPE_START = 18, + MHI_CMD_TYPE_TSYNC = 24, +}; + +/* no operation command */ +#define MHI_TRE_CMD_NOOP_PTR (0) +#define MHI_TRE_CMD_NOOP_DWORD0 (0) +#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_TYPE_NOP << 16) + +/* channel reset command */ +#define MHI_TRE_CMD_RESET_PTR (0) +#define MHI_TRE_CMD_RESET_DWORD0 (0) +#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_TYPE_RESET << 16)) + +/* channel stop command */ +#define MHI_TRE_CMD_STOP_PTR (0) +#define MHI_TRE_CMD_STOP_DWORD0 (0) +#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | (MHI_CMD_TYPE_STOP << 16)) + +/* channel start command */ +#define MHI_TRE_CMD_START_PTR (0) +#define MHI_TRE_CMD_START_DWORD0 (0) +#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_TYPE_START << 16)) + +/* time sync cfg command */ +#define MHI_TRE_CMD_TSYNC_CFG_PTR (0) +#define MHI_TRE_CMD_TSYNC_CFG_DWORD0 (0) +#define MHI_TRE_CMD_TSYNC_CFG_DWORD1(er) ((MHI_CMD_TYPE_TSYNC << 16) | \ + (er << 24)) + +#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) + +/* event descriptor macros */ +#define MHI_TRE_EV_PTR(ptr) (ptr) +#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len) +#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16)) +#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr) +#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF) +#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0]) +#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr) +#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr) +#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF) + +/* transfer descriptor macros */ +#define MHI_TRE_DATA_PTR(ptr) (ptr) +#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU) +#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ + | (ieot << 9) | (ieob << 8) | chain) + +/* rsc transfer descriptor macros */ +#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr) +#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie) +#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16) + +enum MHI_CMD { + MHI_CMD_RESET_CHAN, + MHI_CMD_START_CHAN, + MHI_CMD_TIMSYNC_CFG, +}; + +enum MHI_PKT_TYPE { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_COALESCING = 0x8, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, + MHI_PKT_TYPE_STALE_EVENT, /* internal event */ +}; + +/* MHI transfer completion events */ +enum MHI_EV_CCS { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_EOT = 0x2, + MHI_EV_CC_OVERFLOW = 0x3, + MHI_EV_CC_EOB = 0x4, + MHI_EV_CC_OOB = 0x5, + MHI_EV_CC_DB_MODE = 0x6, + MHI_EV_CC_UNDEFINED_ERR = 0x10, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +enum MHI_CH_STATE { + MHI_CH_STATE_DISABLED = 0x0, + MHI_CH_STATE_ENABLED = 0x1, + MHI_CH_STATE_RUNNING = 0x2, + MHI_CH_STATE_SUSPENDED = 0x3, + MHI_CH_STATE_STOP = 0x4, + MHI_CH_STATE_ERROR = 0x5, +}; + +enum MHI_BRSTMODE { + MHI_BRSTMODE_DISABLE = 0x2, + MHI_BRSTMODE_ENABLE = 0x3, +}; + +#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_BRSTMODE_DISABLE && \ + mode != MHI_BRSTMODE_ENABLE) + +#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \ + ee == MHI_EE_EDL) + +#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW) + +enum MHI_ST_TRANSITION { + MHI_ST_TRANSITION_PBL, + MHI_ST_TRANSITION_READY, + MHI_ST_TRANSITION_SBL, + MHI_ST_TRANSITION_MISSION_MODE, + MHI_ST_TRANSITION_FP, + MHI_ST_TRANSITION_MAX, +}; + +extern const char * const mhi_state_tran_str[MHI_ST_TRANSITION_MAX]; +#define TO_MHI_STATE_TRANS_STR(state) (((state) >= MHI_ST_TRANSITION_MAX) ? \ + "INVALID_STATE" : mhi_state_tran_str[state]) + +extern const char * const mhi_state_str[MHI_STATE_MAX]; +#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ + !mhi_state_str[state]) ? \ + "INVALID_STATE" : mhi_state_str[state]) + +enum { + MHI_PM_BIT_DISABLE, + MHI_PM_BIT_POR, + MHI_PM_BIT_M0, + MHI_PM_BIT_M2, + MHI_PM_BIT_M3_ENTER, + MHI_PM_BIT_M3, + MHI_PM_BIT_M3_EXIT, + MHI_PM_BIT_FW_DL_ERR, + MHI_PM_BIT_SYS_ERR_DETECT, + MHI_PM_BIT_SYS_ERR_PROCESS, + MHI_PM_BIT_SHUTDOWN_PROCESS, + MHI_PM_BIT_LD_ERR_FATAL_DETECT, + MHI_PM_BIT_MAX +}; + +/* internal power states */ +enum MHI_PM_STATE { + MHI_PM_DISABLE = BIT(MHI_PM_BIT_DISABLE), /* MHI is not enabled */ + MHI_PM_POR = BIT(MHI_PM_BIT_POR), /* reset state */ + MHI_PM_M0 = BIT(MHI_PM_BIT_M0), + MHI_PM_M2 = BIT(MHI_PM_BIT_M2), + MHI_PM_M3_ENTER = BIT(MHI_PM_BIT_M3_ENTER), + MHI_PM_M3 = BIT(MHI_PM_BIT_M3), + MHI_PM_M3_EXIT = BIT(MHI_PM_BIT_M3_EXIT), + /* firmware download failure state */ + MHI_PM_FW_DL_ERR = BIT(MHI_PM_BIT_FW_DL_ERR), + MHI_PM_SYS_ERR_DETECT = BIT(MHI_PM_BIT_SYS_ERR_DETECT), + MHI_PM_SYS_ERR_PROCESS = BIT(MHI_PM_BIT_SYS_ERR_PROCESS), + MHI_PM_SHUTDOWN_PROCESS = BIT(MHI_PM_BIT_SHUTDOWN_PROCESS), + /* link not accessible */ + MHI_PM_LD_ERR_FATAL_DETECT = BIT(MHI_PM_BIT_LD_ERR_FATAL_DETECT), +}; + +#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \ + MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \ + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR))) +#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR) +#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT) +#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & MHI_PM_M0) +#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \ + MHI_PM_M2 | MHI_PM_M3_EXIT)) +#define MHI_WAKE_DB_SET_VALID(pm_state) (pm_state & MHI_PM_M2) +#define MHI_WAKE_DB_FORCE_SET_VALID(pm_state) MHI_WAKE_DB_CLEAR_VALID(pm_state) +#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \ + MHI_PM_IN_ERROR_STATE(pm_state)) +#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \ + (MHI_PM_M3_ENTER | MHI_PM_M3)) + +/* accepted buffer type for the channel */ +enum MHI_XFER_TYPE { + MHI_XFER_BUFFER, + MHI_XFER_SKB, + MHI_XFER_SCLIST, + MHI_XFER_NOP, /* CPU offload channel, host does not accept transfer */ + MHI_XFER_DMA, /* receive dma address, already mapped by client */ + MHI_XFER_RSC_DMA, /* RSC type, accept premapped buffer */ +}; + +#define NR_OF_CMD_RINGS (1) +#define CMD_EL_PER_RING (128) +#define PRIMARY_CMD_RING (0) +#define MHI_DEV_WAKE_DB (127) +#define MHI_MAX_MTU (0xffff) + +enum MHI_ER_TYPE { + MHI_ER_TYPE_INVALID = 0x0, + MHI_ER_TYPE_VALID = 0x1, +}; + +enum mhi_er_data_type { + MHI_ER_DATA_ELEMENT_TYPE, + MHI_ER_CTRL_ELEMENT_TYPE, + MHI_ER_TSYNC_ELEMENT_TYPE, + MHI_ER_DATA_TYPE_MAX = MHI_ER_TSYNC_ELEMENT_TYPE, +}; + +enum mhi_ch_ee_mask { + MHI_CH_EE_PBL = BIT(MHI_EE_PBL), + MHI_CH_EE_SBL = BIT(MHI_EE_SBL), + MHI_CH_EE_AMSS = BIT(MHI_EE_AMSS), + MHI_CH_EE_RDDM = BIT(MHI_EE_RDDM), + MHI_CH_EE_PTHRU = BIT(MHI_EE_PTHRU), + MHI_CH_EE_WFW = BIT(MHI_EE_WFW), + MHI_CH_EE_EDL = BIT(MHI_EE_EDL), +}; + +enum mhi_ch_type { + MHI_CH_TYPE_INVALID = 0, + MHI_CH_TYPE_OUTBOUND = DMA_TO_DEVICE, + MHI_CH_TYPE_INBOUND = DMA_FROM_DEVICE, + MHI_CH_TYPE_INBOUND_COALESCED = 3, +}; + +struct db_cfg { + bool reset_req; + bool db_mode; + u32 pollcfg; + enum MHI_BRSTMODE brstmode; + dma_addr_t db_val; + void (*process_db)(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, void __iomem *io_addr, + dma_addr_t db_val); +}; + +struct mhi_pm_transitions { + enum MHI_PM_STATE from_state; + u32 to_states; +}; + +struct state_transition { + struct list_head node; + enum MHI_ST_TRANSITION state; +}; + +/* Control Segment */ +struct mhi_ctrl_seg +{ + struct mhi_tre hw_in_chan_ring[NUM_MHI_IPA_IN_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); + struct mhi_tre hw_out_chan_ring[NUM_MHI_IPA_OUT_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); +#ifdef ENABLE_IP_SW0 + struct mhi_tre sw_in_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); + struct mhi_tre sw_out_chan_ring[NUM_MHI_SW_IP_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); +#endif + +#ifdef ENABLE_ADPL + struct mhi_tre adpl_in_chan_ring[NUM_MHI_ADPL_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); +#endif + +#ifdef ENABLE_QDSS + struct mhi_tre qdss_in_chan_ring[NUM_MHI_QDSS_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_IN_RING_ELEMENTS*16); +#endif + + struct mhi_tre diag_in_chan_ring[NUM_MHI_DIAG_IN_RING_ELEMENTS] __packed __aligned(NUM_MHI_IPA_OUT_RING_ELEMENTS*16); + struct mhi_tre chan_ring[NUM_MHI_CHAN_RING_ELEMENTS*2*12] __packed __aligned(NUM_MHI_CHAN_RING_ELEMENTS*16); + struct mhi_tre event_ring[NUM_MHI_EVT_RINGS][NUM_MHI_EVT_RING_ELEMENTS] __packed __aligned(NUM_MHI_EVT_RING_ELEMENTS*16); + struct mhi_tre cmd_ring[NR_OF_CMD_RINGS][CMD_EL_PER_RING] __packed __aligned(CMD_EL_PER_RING*16); + + struct mhi_chan_ctxt chan_ctxt[NUM_MHI_XFER_RINGS] __aligned(128); + struct mhi_event_ctxt er_ctxt[NUM_MHI_EVT_RINGS] __aligned(128); + struct mhi_cmd_ctxt cmd_ctxt[NR_OF_CMD_RINGS] __aligned(128); +} __aligned(4096); + +struct mhi_ctxt { + struct mhi_event_ctxt *er_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_cmd_ctxt *cmd_ctxt; + dma_addr_t er_ctxt_addr; + dma_addr_t chan_ctxt_addr; + dma_addr_t cmd_ctxt_addr; + struct mhi_ctrl_seg *ctrl_seg; + dma_addr_t ctrl_seg_addr; +}; + +struct mhi_ring { + dma_addr_t dma_handle; + dma_addr_t iommu_base; + u64 *ctxt_wp; /* point to ctxt wp */ + void *pre_aligned; + void *base; + void *rp; + void *wp; + size_t el_size; + size_t len; + size_t elements; + size_t alloc_size; + void __iomem *db_addr; +}; + +struct mhi_cmd { + struct mhi_ring ring; + spinlock_t lock; +}; + +struct mhi_buf_info { + dma_addr_t p_addr; + void *v_addr; + void *bb_addr; + void *wp; + size_t len; + void *cb_buf; + bool used; /* indicate element is free to use */ + bool pre_mapped; /* already pre-mapped by client */ + enum dma_data_direction dir; +}; + +struct mhi_event { + u32 er_index; + u32 intmod; + u32 msi; + int chan; /* this event ring is dedicated to a channel */ + u32 priority; + enum mhi_er_data_type data_type; + struct mhi_ring ring; + struct db_cfg db_cfg; + u32 used_elements; + bool hw_ring; + bool cl_manage; + bool offload_ev; /* managed by a device driver */ + spinlock_t lock; + struct mhi_chan *mhi_chan; /* dedicated to channel */ + struct tasklet_struct task; + int (*process_event)(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota); + struct mhi_controller *mhi_cntrl; +}; + +struct mhi_chan { + u32 chan; + const char *name; + /* + * important, when consuming increment tre_ring first, when releasing + * decrement buf_ring first. If tre_ring has space, buf_ring + * guranteed to have space so we do not need to check both rings. + */ + struct mhi_ring buf_ring; + struct mhi_ring tre_ring; + + u32 used_elements; + u32 used_events[MHI_EV_CC_DB_MODE+1]; + + u32 er_index; + u32 intmod; + enum mhi_ch_type type; + enum dma_data_direction dir; + struct db_cfg db_cfg; + u32 ee_mask; + enum MHI_XFER_TYPE xfer_type; + enum MHI_CH_STATE ch_state; + enum MHI_EV_CCS ccs; + bool lpm_notify; + bool configured; + bool offload_ch; + bool pre_alloc; + bool auto_start; + bool wake_capable; /* channel should wake up system */ + /* functions that generate the transfer ring elements */ + int (*gen_tre)(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, void *buf, void *cb, + size_t len, enum MHI_FLAGS flags); + int (*queue_xfer)(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, void *buf, + size_t len, enum MHI_FLAGS flags); + /* xfer call back */ + struct mhi_device *mhi_dev; + void (*xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *result); + struct mutex mutex; + struct completion completion; + rwlock_t lock; + u32 ring; + u32 tiocm; + struct list_head node; +}; + +struct tsync_node { + struct list_head node; + u32 sequence; + u64 local_time; + u64 remote_time; + struct mhi_device *mhi_dev; + void (*cb_func)(struct mhi_device *mhi_dev, u32 sequence, + u64 local_time, u64 remote_time); +}; + +struct mhi_timesync { + u32 er_index; + void __iomem *db; + void __iomem *time_reg; + enum MHI_EV_CCS ccs; + struct completion completion; + spinlock_t lock; /* list protection */ + struct mutex lpm_mutex; /* lpm protection */ + struct list_head head; +}; + +struct mhi_bus { + struct list_head controller_list; + struct mutex lock; + struct dentry *dentry; +}; + +/* default MHI timeout */ +#define MHI_TIMEOUT_MS (3000) +extern struct mhi_bus mhi_bus; + +/* debug fs related functions */ +int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d); +int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d); +int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d); +int mhi_debugfs_trigger_reset(void *data, u64 val); + +void mhi_deinit_debugfs(struct mhi_controller *mhi_cntrl); +void mhi_init_debugfs(struct mhi_controller *mhi_cntrl); + +/* power management apis */ +enum MHI_PM_STATE __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE state); +const char *to_mhi_pm_state_str(enum MHI_PM_STATE state); +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +enum mhi_ee mhi_get_exec_env(struct mhi_controller *mhi_cntrl); +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum MHI_ST_TRANSITION state); +void mhi_pm_st_worker(struct work_struct *work); +void mhi_fw_load_worker(struct work_struct *work); +void mhi_pm_sys_err_worker(struct work_struct *work); +void mhi_pm_ready_worker(struct work_struct *work); +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); +void mhi_ctrl_ev_task(unsigned long data); +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl); +void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason); +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, u32 event_quota); +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + enum MHI_CMD cmd); +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl); + +/* queue transfer buffer */ +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, + void *buf, void *cb, size_t buf_len, enum MHI_FLAGS flags); +int mhi_queue_buf(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_skb(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_sclist(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_nop(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); +int mhi_queue_dma(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan, + void *buf, size_t len, enum MHI_FLAGS mflags); + +/* register access methods */ +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, struct db_cfg *db_cfg, + void __iomem *db_addr, dma_addr_t wp); +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_mode, void __iomem *db_addr, + dma_addr_t wp); +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 *out); +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 shift, u32 *out); +void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 val); +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, + u32 offset, u32 mask, u32 shift, u32 val); +void mhi_ring_er_db(struct mhi_event *mhi_event); +void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, + dma_addr_t wp); +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd); +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability, + u32 *offset); +int mhi_init_timesync(struct mhi_controller *mhi_cntrl); +int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl); +void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl); + +/* memory allocation methods */ +static inline void *mhi_alloc_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + dma_addr_t *dma_handle, + gfp_t gfp) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,0,0 )) + void *buf = dma_zalloc_coherent(mhi_cntrl->dev, size, dma_handle, gfp); +#else + void *buf = dma_alloc_coherent(mhi_cntrl->dev, size, dma_handle, gfp | __GFP_ZERO); +#endif + + MHI_LOG("size = %zd, dma_handle = %llx\n", size, (u64)*dma_handle); + if (buf) + atomic_add(size, &mhi_cntrl->alloc_size); + + return buf; +} +static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl, + size_t size, + void *vaddr, + dma_addr_t dma_handle) +{ + atomic_sub(size, &mhi_cntrl->alloc_size); + dma_free_coherent(mhi_cntrl->dev, size, vaddr, dma_handle); +} +struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl); +static inline void mhi_dealloc_device(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev) +{ + kfree(mhi_dev); +} +int mhi_destroy_device(struct device *dev, void *data); +void mhi_create_devices(struct mhi_controller *mhi_cntrl); +int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info **image_info, size_t alloc_size); +void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl, + struct image_info *image_info); + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info); + +/* initialization methods */ +int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); +int mhi_init_mmio(struct mhi_controller *mhi_cntrl); +int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); +void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); +int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); +void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); +int mhi_dtr_init(void); + +/* isr handlers */ +irqreturn_t mhi_one_msi_handlr(int irq_number, void *dev); +irqreturn_t mhi_msi_handlr(int irq_number, void *dev); +irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev); +irqreturn_t mhi_intvec_handlr(int irq_number, void *dev); +void mhi_ev_task(unsigned long data); + +#ifdef CONFIG_MHI_DEBUG + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) \ + panic(msg); \ +} while (0) + +#else + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) { \ + MHI_ERR(msg); \ + WARN_ON(cond); \ + } \ +} while (0) + +#endif + +#endif /* _MHI_INT_H */ diff --git a/wwan/driver/quectel_MHI/src/core/mhi_main.c b/wwan/driver/quectel_MHI/src/core/mhi_main.c new file mode 100644 index 0000000..12d44e6 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/core/mhi_main.c @@ -0,0 +1,2722 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan); + +int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 *out) +{ + u32 tmp = readl_relaxed(base + offset); + + /* unexpected value, query the link status */ + if (PCI_INVALID_READ(tmp) && + mhi_cntrl->link_status(mhi_cntrl, mhi_cntrl->priv_data)) + return -EIO; + + *out = tmp; + + return 0; +} + +int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 mask, + u32 shift, + u32 *out) +{ + u32 tmp; + int ret; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return ret; + + *out = (tmp & mask) >> shift; + + return 0; +} + +int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, + u32 capability, + u32 *offset) +{ + u32 cur_cap, next_offset; + int ret; + + /* get the 1st supported capability offset */ + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MISC_OFFSET, + MISC_CAP_MASK, MISC_CAP_SHIFT, offset); + if (ret) + return ret; + if (*offset >= 0x1000) + return -ENXIO; + do { + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_CAPID_MASK, CAP_CAPID_SHIFT, + &cur_cap); + if (ret) + return ret; + + if (cur_cap == capability) + return 0; + + ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, *offset, + CAP_NEXT_CAP_MASK, CAP_NEXT_CAP_SHIFT, + &next_offset); + if (ret) + return ret; + + *offset += next_offset; + } while (next_offset); + + return -ENXIO; +} + +void mhi_write_reg(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 val) +{ + writel_relaxed(val, base + offset); +} + +void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, + u32 offset, + u32 mask, + u32 shift, + u32 val) +{ + int ret; + u32 tmp; + + ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); + if (ret) + return; + + tmp &= ~mask; + tmp |= (val << shift); + mhi_write_reg(mhi_cntrl, base, offset, tmp); +} + +void mhi_write_db(struct mhi_controller *mhi_cntrl, + void __iomem *db_addr, + dma_addr_t wp) +{ + mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(wp)); + mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(wp)); +#if 0 //carl.yin 20190527 for debug + if ((lower_32_bits(db_addr)&0xFFF) != 0x620) + { + u32 out = 0; + int ret = mhi_read_reg(mhi_cntrl, db_addr, 0, &out); + if (out != lower_32_bits(wp)) + MHI_ERR("%s db=%x, wp=w:%x - r:%x, ret=%d\n", __func__, lower_32_bits(db_addr), lower_32_bits(wp), out, ret); + } +#endif +} + +void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t wp) +{ + if (db_cfg->db_mode) { + db_cfg->db_val = wp; + mhi_write_db(mhi_cntrl, db_addr, wp); + db_cfg->db_mode = false; + } +} + +void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, + struct db_cfg *db_cfg, + void __iomem *db_addr, + dma_addr_t wp) +{ + db_cfg->db_val = wp; + mhi_write_db(mhi_cntrl, db_addr, wp); +} + +void mhi_ring_er_db(struct mhi_event *mhi_event) +{ + struct mhi_ring *ring = &mhi_event->ring; + + mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, + ring->db_addr, *ring->ctxt_wp); +} + +void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) +{ + dma_addr_t db; + struct mhi_ring *ring = &mhi_cmd->ring; + + db = ring->iommu_base + (ring->wp - ring->base); + *ring->ctxt_wp = db; + mhi_write_db(mhi_cntrl, ring->db_addr, db); +} + +//#define DEBUG_CHAN100_DB +#ifdef DEBUG_CHAN100_DB +static atomic_t chan100_seq = ATOMIC_INIT(0); +#define CHAN100_SIZE 0x1000 +static unsigned int chan100_t[CHAN100_SIZE]; +#endif + +void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *ring = &mhi_chan->tre_ring; + dma_addr_t db; + + db = ring->iommu_base + (ring->wp - ring->base); + /* + * Writes to the new ring element must be visible to the hardware + * before letting h/w know there is new element to fetch. + */ + dma_wmb(); + *ring->ctxt_wp = db; + mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, ring->db_addr, + db); +} + +enum mhi_ee mhi_get_exec_env(struct mhi_controller *mhi_cntrl) +{ + u32 exec; + int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); + + return (ret) ? MHI_EE_MAX : exec; +} + +enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) +{ + u32 state; + int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, &state); + return ret ? MHI_STATE_MAX : state; +} + +int mhi_queue_sclist(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + return -EINVAL; +} + +int mhi_queue_nop(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + return -EINVAL; +} + +static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *wp = ring->wp; + wp += ring->el_size; + if (wp >= (ring->base + ring->len)) + wp = ring->base; + ring->wp = wp; + /* smp update */ + smp_wmb(); +} + +static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *rp = ring->rp; + rp += ring->el_size; + if (rp >= (ring->base + ring->len)) + rp = ring->base; + ring->rp = rp; + /* smp update */ + smp_wmb(); +} + +static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + int nr_el; + + if (ring->wp < ring->rp) + nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; + else { + nr_el = (ring->rp - ring->base) / ring->el_size; + nr_el += ((ring->base + ring->len - ring->wp) / + ring->el_size) - 1; + } + return nr_el; +} + +static u32 get_used_ring_elements(void *rp, void *wp, u32 el_num) +{ + u32 nr_el; + + if (wp >= rp) + nr_el = (wp - rp)/sizeof(struct mhi_tre); + else { + nr_el = (rp - wp)/sizeof(struct mhi_tre); + nr_el = el_num - nr_el; + } + return nr_el; +} + +static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) +{ + return (addr - ring->iommu_base) + ring->base; +} + +dma_addr_t mhi_to_physical(struct mhi_ring *ring, void *addr) +{ + return (addr - ring->base) + ring->iommu_base; +} + +static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *rp, *wp; + + /* update the WP */ + wp = ring->wp; + wp += ring->el_size; + if (wp >= (ring->base + ring->len)) { + wp = ring->base; + } + ring->wp = wp; + + *ring->ctxt_wp = ring->iommu_base + (ring->wp - ring->base); + + /* update the RP */ + rp = ring->rp; + rp += ring->el_size; + if (rp >= (ring->base + ring->len)) + rp = ring->base; + ring->rp = rp; + + /* visible to other cores */ + smp_wmb(); +} + +static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, + struct mhi_ring *ring) +{ + void *tmp = ring->wp + ring->el_size; + + if (tmp >= (ring->base + ring->len)) + tmp = ring->base; + + return (tmp == ring->rp); +} + +int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + buf_info->p_addr = dma_map_single(mhi_cntrl->dev, buf_info->v_addr, + buf_info->len, buf_info->dir); + if (dma_mapping_error(mhi_cntrl->dev, buf_info->p_addr)) + return -ENOMEM; + + return 0; +} + +int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len, + &buf_info->p_addr, GFP_ATOMIC); + + if (!buf) + return -ENOMEM; + + if (buf_info->dir == DMA_TO_DEVICE) + memcpy(buf, buf_info->v_addr, buf_info->len); + + buf_info->bb_addr = buf; + + return 0; +} + +void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + dma_unmap_single(mhi_cntrl->dev, buf_info->p_addr, buf_info->len, + buf_info->dir); +} + +void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, + struct mhi_buf_info *buf_info) +{ + if (buf_info->dir == DMA_FROM_DEVICE) + memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); + + mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr, + buf_info->p_addr); +} + +#ifdef ENABLE_MHI_MON +static void mon_bus_submit(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_submit(r->r_data, chan, wp, mhi_tre, buf, len); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} + +static void mon_bus_receive(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre, void *buf, size_t len) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_receive(r->r_data, chan, wp, mhi_tre, buf, len); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} + +static void mon_bus_complete(struct mhi_controller *mbus, u32 chan, dma_addr_t wp, struct mhi_tre *mhi_tre) +{ + unsigned long flags; + struct list_head *pos; + struct mon_reader *r; + + spin_lock_irqsave(&mbus->lock, flags); + mbus->cnt_events++; + list_for_each (pos, &mbus->r_list) { + r = list_entry(pos, struct mon_reader, r_link); + r->rnf_complete(r->r_data, chan, wp, mhi_tre); + } + spin_unlock_irqrestore(&mbus->lock, flags); +} +#endif + +int mhi_queue_skb(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + struct sk_buff *skb = buf; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_ring *buf_ring = &mhi_chan->buf_ring; + struct mhi_buf_info *buf_info; + struct mhi_tre *mhi_tre; + bool assert_wake = false; + int ret; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_VERB("MHI is not in activate state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + + /* generate the tre */ + buf_info = buf_ring->wp; + buf_info->v_addr = skb->data; + buf_info->cb_buf = skb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = len; + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + goto map_error; + + mhi_tre = tre_ring->wp; + + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0 : buf_info->len); + } +#endif + + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, + (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, + mhi_tre->dword[0], mhi_tre->dword[1]); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + + read_lock_bh(&mhi_chan->lock); + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->wp)&0xffff) | (mhi_chan->db_cfg.db_mode<<31) | (0<<30); + } +#endif + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_bh(&mhi_chan->lock); + + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +map_error: + if (assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, false); + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} + +int mhi_queue_dma(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + struct mhi_buf *mhi_buf = buf; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + struct mhi_ring *buf_ring = &mhi_chan->buf_ring; + struct mhi_buf_info *buf_info; + struct mhi_tre *mhi_tre; + bool assert_wake = false; + + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_VERB("MHI is not in activate state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; + } + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + + /* generate the tre */ + buf_info = buf_ring->wp; + MHI_ASSERT(buf_info->used, "TRE Not Freed\n"); + buf_info->p_addr = mhi_buf->dma_addr; + buf_info->pre_mapped = true; + buf_info->cb_buf = mhi_buf; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = len; + + mhi_tre = tre_ring->wp; + + if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) { + buf_info->used = true; + mhi_tre->ptr = + MHI_RSCTRE_DATA_PTR(buf_info->p_addr, buf_info->len); + mhi_tre->dword[0] = + MHI_RSCTRE_DATA_DWORD0(buf_ring->wp - buf_ring->base); + mhi_tre->dword[1] = MHI_RSCTRE_DATA_DWORD1; + } else { + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0); + } + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0: buf_info->len); + } +#endif + + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, + (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, + mhi_tre->dword[0], mhi_tre->dword[1]); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + + read_lock_bh(&mhi_chan->lock); + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_bh(&mhi_chan->lock); + + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); + + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; +} + +int mhi_gen_tre(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + void *buf, + void *cb, + size_t buf_len, + enum MHI_FLAGS flags) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_tre *mhi_tre; + struct mhi_buf_info *buf_info; + int eot, eob, chain, bei; + int ret; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + buf_info = buf_ring->wp; + buf_info->v_addr = buf; + buf_info->cb_buf = cb; + buf_info->wp = tre_ring->wp; + buf_info->dir = mhi_chan->dir; + buf_info->len = buf_len; + + ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); + if (ret) + return ret; + + eob = !!(flags & MHI_EOB); + eot = !!(flags & MHI_EOT); + chain = !!(flags & MHI_CHAIN); + bei = !!(mhi_chan->intmod); + + mhi_tre = tre_ring->wp; + mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); + mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len); + mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, mhi_tre), mhi_tre, buf_info->v_addr, mhi_chan->chan&0x1 ? 0 : buf_info->len); + } +#endif + MHI_VERB("chan:%d WP:0x%llx TRE:0x%llx 0x%08x 0x%08x\n", mhi_chan->chan, + (u64)mhi_to_physical(tre_ring, mhi_tre), mhi_tre->ptr, + mhi_tre->dword[0], mhi_tre->dword[1]); + + /* increment WP */ + mhi_add_ring_element(mhi_cntrl, tre_ring); + mhi_add_ring_element(mhi_cntrl, buf_ring); + + return 0; +} + +int mhi_queue_buf(struct mhi_device *mhi_dev, + struct mhi_chan *mhi_chan, + void *buf, + size_t len, + enum MHI_FLAGS mflags) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ring *tre_ring; + unsigned long flags; + bool assert_wake = false; + int ret; + + /* + * this check here only as a guard, it's always + * possible mhi can enter error while executing rest of function, + * which is not fatal so we do not need to hold pm_lock + */ + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_VERB("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + return -EIO; + } + + tre_ring = &mhi_chan->tre_ring; + if (mhi_is_ring_full(mhi_cntrl, tre_ring)) + return -ENOMEM; + + ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags); + if (unlikely(ret)) + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); + + /* we're in M3 or transitioning to M3 */ + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + /* + * For UL channels always assert WAKE until work is done, + * For DL channels only assert if MHI is in a LPM + */ + if (mhi_chan->dir == DMA_TO_DEVICE || + (mhi_chan->dir == DMA_FROM_DEVICE && + mhi_cntrl->pm_state != MHI_PM_M0)) { + assert_wake = true; + mhi_cntrl->wake_get(mhi_cntrl, false); + } + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_inc_return(&mhi_cntrl->pending_pkts) == 1) + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + } + + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) { + unsigned long flags; + + read_lock_irqsave(&mhi_chan->lock, flags); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irqrestore(&mhi_chan->lock, flags); + } + + if (mhi_chan->dir == DMA_FROM_DEVICE && assert_wake) + mhi_cntrl->wake_put(mhi_cntrl, true); + + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + + return 0; +} + +static ssize_t ul_chan_id_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct mhi_device *mhi_dev = to_mhi_device(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", mhi_dev->ul_chan_id); +} + +static DEVICE_ATTR_RO(ul_chan_id); + +static struct attribute *mhi_dev_attrs[] = { + &dev_attr_ul_chan_id.attr, + NULL, +}; + +static struct attribute_group mhi_dev_attr_group = { + .attrs = mhi_dev_attrs, +}; + +/* destroy specific device */ +int mhi_destroy_device(struct device *dev, void *data) +{ + struct mhi_device *mhi_dev; + struct mhi_controller *mhi_cntrl; + + if (dev->bus != &mhi_bus_type) + return 0; + + mhi_dev = to_mhi_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* only destroying virtual devices thats attached to bus */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE) + return 0; + + MHI_LOG("destroy device for chan:%s\n", mhi_dev->chan_name); + + sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_dev_attr_group); + /* notify the client and remove the device from mhi bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +void mhi_notify(struct mhi_device *mhi_dev, enum MHI_CB cb_reason) +{ + struct mhi_driver *mhi_drv; + + if (!mhi_dev->dev.driver) + return; + + mhi_drv = to_mhi_driver(mhi_dev->dev.driver); + + if (mhi_drv->status_cb) + mhi_drv->status_cb(mhi_dev, cb_reason); +} + +static void mhi_assign_of_node(struct mhi_controller *mhi_cntrl, + struct mhi_device *mhi_dev) +{ + struct device_node *controller, *node; + const char *dt_name; + int ret; + + controller = of_find_node_by_name(mhi_cntrl->of_node, "mhi_devices"); + if (!controller) + return; + + for_each_available_child_of_node(controller, node) { + ret = of_property_read_string(node, "mhi,chan", &dt_name); + if (ret) + continue; + if (!strcmp(mhi_dev->chan_name, dt_name)) { + mhi_dev->dev.of_node = node; + break; + } + } +} + +static ssize_t time_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + u64 t_host, t_device; + int ret; + + ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device); + if (ret) { + MHI_ERR("Failed to obtain time, ret:%d\n", ret); + return ret; + } + + return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (ticks)\n", + t_host, t_device); +} +static DEVICE_ATTR_RO(time); + +static ssize_t time_us_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + u64 t_host, t_device; + int ret; + + ret = mhi_get_remote_time_sync(mhi_dev, &t_host, &t_device); + if (ret) { + MHI_ERR("Failed to obtain time, ret:%d\n", ret); + return ret; + } + + return scnprintf(buf, PAGE_SIZE, "local: %llu remote: %llu (us)\n", + TIME_TICKS_TO_US(t_host), TIME_TICKS_TO_US(t_device)); +} +static DEVICE_ATTR_RO(time_us); + +static struct attribute *mhi_tsync_attrs[] = { + &dev_attr_time.attr, + &dev_attr_time_us.attr, + NULL, +}; + +static const struct attribute_group mhi_tsync_group = { + .attrs = mhi_tsync_attrs, +}; + +void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl) +{ + if (mhi_cntrl->mhi_tsync) { + sysfs_remove_group(&mhi_cntrl->mhi_dev->dev.kobj, + &mhi_tsync_group); + kfree(mhi_cntrl->mhi_tsync); + mhi_cntrl->mhi_tsync = NULL; + } +} + +int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl) +{ + return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj, + &mhi_tsync_group); +} + +static void mhi_create_time_sync_dev(struct mhi_controller *mhi_cntrl) +{ + struct mhi_device *mhi_dev; + int ret; + + if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) + return; + + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) + return; + + mhi_dev->dev_type = MHI_TIMESYNC_TYPE; + mhi_dev->chan_name = "TIME_SYNC"; + dev_set_name(&mhi_dev->dev, "%04x_%02x.%02x.%02x_%s", mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_dev->chan_name); + + /* add if there is a matching DT node */ + mhi_assign_of_node(mhi_cntrl, mhi_dev); + + ret = device_add(&mhi_dev->dev); + if (ret) { + MHI_ERR("Failed to register dev for chan:%s\n", + mhi_dev->chan_name); + mhi_dealloc_device(mhi_cntrl, mhi_dev); + return; + } + + mhi_cntrl->tsync_dev = mhi_dev; +} + +/* bind mhi channels into mhi devices */ +void mhi_create_devices(struct mhi_controller *mhi_cntrl) +{ + int i; + struct mhi_chan *mhi_chan; + struct mhi_device *mhi_dev; + int ret; + + /* + * we need to create time sync device before creating other + * devices, because client may try to capture time during + * clint probe. + */ + mhi_create_time_sync_dev(mhi_cntrl); + + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + if (!mhi_chan->configured || mhi_chan->mhi_dev || + !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) + continue; + mhi_dev = mhi_alloc_device(mhi_cntrl); + if (!mhi_dev) + return; + + mhi_dev->dev_type = MHI_XFER_TYPE; + switch (mhi_chan->dir) { + case DMA_TO_DEVICE: + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_xfer = mhi_chan->queue_xfer; + mhi_dev->ul_event_id = mhi_chan->er_index; + break; + case DMA_NONE: + case DMA_BIDIRECTIONAL: + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_event_id = mhi_chan->er_index; + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_xfer = mhi_chan->queue_xfer; + mhi_dev->dl_event_id = mhi_chan->er_index; + break; + case DMA_FROM_DEVICE: + /* we use dl_chan for offload channels */ + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_xfer = mhi_chan->queue_xfer; + mhi_dev->dl_event_id = mhi_chan->er_index; + break; + } + + mhi_chan->mhi_dev = mhi_dev; + + /* check next channel if it matches */ + if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { + if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { + i++; + mhi_chan++; + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + mhi_dev->ul_xfer = mhi_chan->queue_xfer; + mhi_dev->ul_event_id = + mhi_chan->er_index; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + mhi_dev->dl_xfer = mhi_chan->queue_xfer; + mhi_dev->dl_event_id = + mhi_chan->er_index; + } + mhi_chan->mhi_dev = mhi_dev; + } + } + + mhi_dev->chan_name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%04x_%02x.%02x.%02x_%s", + mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, + mhi_dev->slot, mhi_dev->chan_name); + + /* add if there is a matching DT node */ + mhi_assign_of_node(mhi_cntrl, mhi_dev); + + /* init wake source */ + if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) + device_init_wakeup(&mhi_dev->dev, true); + + ret = device_add(&mhi_dev->dev); + if (ret) { + MHI_ERR("Failed to register dev for chan:%s\n", + mhi_dev->chan_name); + mhi_dealloc_device(mhi_cntrl, mhi_dev); + } + ret = sysfs_create_group(&mhi_dev->dev.kobj, &mhi_dev_attr_group); + } +} + +static void mhi_dump_tre(struct mhi_controller *mhi_cntrl, struct mhi_tre *_ev) { + union mhi_dev_ring_element_type *ev = (union mhi_dev_ring_element_type *)_ev; + + switch (ev->generic.type) { + case MHI_DEV_RING_EL_INVALID: { + MHI_ERR("carl_ev cmd_invalid, ptr=%llx, %x, %x\n", _ev->ptr, _ev->dword[0], _ev->dword[1]); + } + break; + case MHI_DEV_RING_EL_NOOP: { + MHI_LOG("carl_ev cmd_no_op chan=%u\n", ev->cmd_no_op.chid); + } + break; + case MHI_DEV_RING_EL_TRANSFER: { + MHI_LOG("carl_ev cmd_transfer data=%llx, len=%u, chan=%u\n", + ev->cmd_transfer.data_buf_ptr, ev->cmd_transfer.len, ev->cmd_transfer.chain); + } + break; + case MHI_DEV_RING_EL_RESET: { + MHI_LOG("carl_ev cmd_reset chan=%u\n", ev->cmd_reset.chid); + } + break; + case MHI_DEV_RING_EL_STOP: { + MHI_LOG("carl_ev cmd_stop chan=%u\n", ev->cmd_stop.chid); + } + break; + case MHI_DEV_RING_EL_START: { + MHI_LOG("carl_ev cmd_start chan=%u\n", ev->cmd_start.chid); + } + break; + case MHI_DEV_RING_EL_MHI_STATE_CHG: { + MHI_LOG("carl_ev evt_state_change mhistate=%u\n", ev->evt_state_change.mhistate); + } + break; + case MHI_DEV_RING_EL_CMD_COMPLETION_EVT:{ + MHI_LOG("carl_ev evt_cmd_comp code=%u, type=%u\n", ev->evt_cmd_comp.code, ev->evt_cmd_comp.type); + } + break; + case MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT:{ + MHI_VERB("carl_ev evt_tr_comp ptr=%llx, len=%u, code=%u, chan=%u\n", + ev->evt_tr_comp.ptr, ev->evt_tr_comp.len, ev->evt_tr_comp.code, ev->evt_tr_comp.chid); + } + break; + case MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY:{ + MHI_LOG("carl_ev evt_ee_state execenv=%u\n", ev->evt_ee_state.execenv); + } + break; + case MHI_DEV_RING_EL_UNDEF: + default: { + MHI_ERR("carl_ev el_undef type=%d\n", ev->generic.type); + }; + break; + } +} + +static int parse_xfer_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + u32 ev_code; + struct mhi_result result; + unsigned long flags = 0; + + ev_code = MHI_TRE_GET_EV_CODE(event); + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + + /* + * if it's a DB Event then we need to grab the lock + * with preemption disable and as a write because we + * have to update db register and another thread could + * be doing same. + */ + if (ev_code >= MHI_EV_CC_OOB) + write_lock_irqsave(&mhi_chan->lock, flags); + else + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_tx_event; + + switch (ev_code) { + case MHI_EV_CC_OVERFLOW: + case MHI_EV_CC_EOB: + case MHI_EV_CC_EOT: + { + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); + struct mhi_tre *local_rp, *ev_tre; + void *dev_rp; + struct mhi_buf_info *buf_info; + u16 xfer_len; + + /* Get the TRB this event points to */ + ev_tre = mhi_to_virtual(tre_ring, ptr); + + /* device rp after servicing the TREs */ + dev_rp = ev_tre + 1; + if (dev_rp >= (tre_ring->base + tre_ring->len)) + dev_rp = tre_ring->base; + + mhi_chan->used_events[ev_code]++; + + result.dir = mhi_chan->dir; + + /* local rp */ + local_rp = tre_ring->rp; + while (local_rp != dev_rp) { + buf_info = buf_ring->rp; + /* Always get the get len from the event */ + xfer_len = MHI_TRE_GET_EV_LEN(event); + + /* unmap if it's not premapped by client */ + if (likely(!buf_info->pre_mapped)) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + + result.buf_addr = buf_info->cb_buf; + result.bytes_xferd = xfer_len; +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + void *buf = NULL; + size_t len = 0; + + if (mhi_chan->queue_xfer == mhi_queue_skb) { + struct sk_buff *skb = result.buf_addr; + buf = skb->data; + len = result.bytes_xferd; + } + else if (CHAN_INBOUND(mhi_chan->chan)) { + buf = result.buf_addr; + len = result.bytes_xferd; + } + mon_bus_receive(mhi_cntrl, mhi_chan->chan, + mhi_to_physical(tre_ring, local_rp), local_rp, buf, len); + } +#endif + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + local_rp = tre_ring->rp; + + /* notify client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_dec_return(&mhi_cntrl->pending_pkts) == 0) + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + if (mhi_chan->dir == DMA_TO_DEVICE) { + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + /* + * recycle the buffer if buffer is pre-allocated, + * if there is error, not much we can do apart from + * dropping the packet + */ + if (mhi_chan->pre_alloc) { + if (mhi_queue_buf(mhi_chan->mhi_dev, mhi_chan, + buf_info->cb_buf, + buf_info->len, MHI_EOT)) { + MHI_ERR( + "Error recycling buffer for chan:%d\n", + mhi_chan->chan); + kfree(buf_info->cb_buf); + } + } + } + break; + } /* CC_EOT */ + case MHI_EV_CC_OOB: + case MHI_EV_CC_DB_MODE: + { + unsigned long flags; + + mhi_chan->used_events[ev_code]++; + MHI_VERB("DB_MODE/OOB Detected chan %d.\n", mhi_chan->chan); + mhi_chan->db_cfg.db_mode = true; + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->rp)&0xffff) | (0xf0000); + chan100_t[atomic_inc_return(&chan100_seq)&(CHAN100_SIZE-1)] = (((unsigned long)tre_ring->wp)&0xffff) | (mhi_chan->db_cfg.db_mode<<31) | (1<<30); + } +#endif + if (tre_ring->wp != tre_ring->rp && + MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) { + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + } + read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + break; + } + case MHI_EV_CC_BAD_TRE: + MHI_ASSERT(1, "Received BAD TRE event for ring"); + break; + default: + MHI_CRITICAL("Unknown TX completion.\n"); + + break; + } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ + +end_process_tx_event: + if (ev_code >= MHI_EV_CC_OOB) + write_unlock_irqrestore(&mhi_chan->lock, flags); + else + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static int parse_rsc_event(struct mhi_controller *mhi_cntrl, + struct mhi_tre *event, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_buf_info *buf_info; + struct mhi_result result; + int ev_code; + u32 cookie; /* offset to local descriptor */ + u16 xfer_len; + + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + + ev_code = MHI_TRE_GET_EV_CODE(event); + cookie = MHI_TRE_GET_EV_COOKIE(event); + xfer_len = MHI_TRE_GET_EV_LEN(event); + + /* received out of bound cookie */ + MHI_ASSERT(cookie >= buf_ring->len, "Invalid Cookie\n"); + + buf_info = buf_ring->base + cookie; + + result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? + -EOVERFLOW : 0; + result.bytes_xferd = xfer_len; + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + + read_lock_bh(&mhi_chan->lock); + + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) + goto end_process_rsc_event; + + MHI_ASSERT(!buf_info->used, "TRE already Freed\n"); + + /* notify the client */ + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* + * Note: We're arbitrarily incrementing RP even though, completion + * packet we processed might not be the same one, reason we can do this + * is because device guaranteed to cache descriptors in order it + * receive, so even though completion event is different we can re-use + * all descriptors in between. + * Example: + * Transfer Ring has descriptors: A, B, C, D + * Last descriptor host queue is D (WP) and first descriptor + * host queue is A (RP). + * The completion event we just serviced is descriptor C. + * Then we can safely queue descriptors to replace A, B, and C + * even though host did not receive any completions. + */ + mhi_del_ring_element(mhi_cntrl, tre_ring); + buf_info->used = false; + +end_process_rsc_event: + read_unlock_bh(&mhi_chan->lock); + + return 0; +} + +static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, + struct mhi_tre *tre) +{ + dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); + struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *mhi_ring = &cmd_ring->ring; + struct mhi_tre *cmd_pkt; + struct mhi_chan *mhi_chan; + struct mhi_timesync *mhi_tsync; + enum mhi_cmd_type type; + u32 chan; + + cmd_pkt = mhi_to_virtual(mhi_ring, ptr); + + /* out of order completion received */ + MHI_ASSERT(cmd_pkt != mhi_ring->rp, "Out of order cmd completion"); + + type = MHI_TRE_GET_CMD_TYPE(cmd_pkt); + + if (type == MHI_CMD_TYPE_TSYNC) { + mhi_tsync = mhi_cntrl->mhi_tsync; + mhi_tsync->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_tsync->completion); + } else { + chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + write_lock_bh(&mhi_chan->lock); + mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); + complete(&mhi_chan->completion); + write_unlock_bh(&mhi_chan->lock); + } + + mhi_del_ring_element(mhi_cntrl, mhi_ring); +} + +int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + + /* + * this is a quick check to avoid unnecessary event processing + * in case we already in error state, but it's still possible + * to transition to error state while processing events + */ + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + +//#define QL_READ_EVENT_WA //from Quectel Windows driver +#ifdef QL_READ_EVENT_WA + if (mhi_event->er_index == 0) { + if (local_rp->ptr == 0 && local_rp->dword[0] == 0 && local_rp->dword[1] == 0) { + // event content no sync to memory, just break and wait next event. + MHI_ERR("Zero Event!\n"); + break; + } + } +#endif + + mhi_dump_tre(mhi_cntrl, local_rp); + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_complete(mhi_cntrl, mhi_event->er_index, mhi_to_physical(ev_ring, local_rp), local_rp); + } +#endif + + switch (type) { + case MHI_PKT_TYPE_STATE_CHANGE_EVENT: + { + enum mhi_dev_state new_state; + + new_state = MHI_TRE_GET_EV_STATE(local_rp); + + MHI_LOG("MHI state change event to state:%s\n", + TO_MHI_STATE_STR(new_state)); + + switch (new_state) { + case MHI_STATE_M0: + mhi_pm_m0_transition(mhi_cntrl); + break; + case MHI_STATE_M1: + mhi_pm_m1_transition(mhi_cntrl); + break; + case MHI_STATE_M3: + mhi_pm_m3_transition(mhi_cntrl); + break; + case MHI_STATE_SYS_ERR: + { + enum MHI_PM_STATE new_state; + + MHI_ERR("MHI system error detected\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + new_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (new_state == MHI_PM_SYS_ERR_DETECT) + schedule_work( + &mhi_cntrl->syserr_worker); + break; + } + default: + MHI_ERR("Unsupported STE:%s\n", + TO_MHI_STATE_STR(new_state)); + } + + break; + } + case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: + mhi_process_cmd_completion(mhi_cntrl, local_rp); + break; + case MHI_PKT_TYPE_EE_EVENT: + { + enum MHI_ST_TRANSITION st = MHI_ST_TRANSITION_MAX; + enum mhi_ee event = MHI_TRE_GET_EV_EXECENV(local_rp); + + MHI_LOG("MHI EE received event:%s\n", + TO_MHI_EXEC_STR(event)); + switch (event) { + case MHI_EE_SBL: + st = MHI_ST_TRANSITION_SBL; + break; + case MHI_EE_FP: + st = MHI_ST_TRANSITION_FP; + break; + case MHI_EE_WFW: + case MHI_EE_AMSS: + st = MHI_ST_TRANSITION_MISSION_MODE; + break; + case MHI_EE_RDDM: + mhi_cntrl->status_cb(mhi_cntrl, + mhi_cntrl->priv_data, + MHI_CB_EE_RDDM); + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = event; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + break; + default: + MHI_ERR("Unhandled EE event:%s\n", + TO_MHI_EXEC_STR(event)); + } + if (st != MHI_ST_TRANSITION_MAX) + mhi_queue_state_transition(mhi_cntrl, st); + break; + } +#if 1 //Add by Quectel + case MHI_PKT_TYPE_TX_EVENT: + case MHI_PKT_TYPE_RSC_TX_EVENT: + { + u32 chan = MHI_TRE_GET_EV_CHID(local_rp); + struct mhi_chan *mhi_chan = &mhi_cntrl->mhi_chan[chan]; + + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + } + break; + } +#endif + default: + MHI_ASSERT(1, "Unsupported ev type"); + break; + } + +#ifdef QL_READ_EVENT_WA + if (mhi_event->er_index == 0) { + local_rp->ptr = 0; + local_rp->dword[0] = local_rp->dword[1] = 0; + } +#endif + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + + if (count > mhi_event->used_elements) { + mhi_event->used_elements = count; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + +int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + int count = 0; + u32 chan; + struct mhi_chan *mhi_chan = NULL; + u32 chan_count = 0; + void *chan_local_rp = NULL; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp && event_quota > 0) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + + mhi_dump_tre(mhi_cntrl, local_rp); + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + chan = MHI_TRE_GET_EV_CHID(local_rp); + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + chan_local_rp = mhi_chan->tre_ring.rp; + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_complete(mhi_cntrl, mhi_event->er_index, mhi_to_physical(ev_ring, local_rp), local_rp); + } +#endif + if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { + parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { + parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); + event_quota--; + } + + chan_count += get_used_ring_elements(chan_local_rp, mhi_chan->tre_ring.rp, mhi_chan->tre_ring.elements); + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + if (local_rp == dev_rp || event_quota == 0) { + if (chan_count > mhi_chan->used_elements) + mhi_chan->used_elements = chan_count; + chan_count = 0; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + } + count++; + } + + if (count > mhi_event->used_elements) { + mhi_event->used_elements = count; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + +int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + u32 event_quota) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring = &mhi_event->ring; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + int count = 0; + u32 sequence; + u64 remote_time; + + if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) { + MHI_ERR("No EV access, PM_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + local_rp = ev_ring->rp; + + while (dev_rp != local_rp) { + enum MHI_PKT_TYPE type = MHI_TRE_GET_EV_TYPE(local_rp); + struct tsync_node *tsync_node; + + MHI_VERB("Processing Event:0x%llx 0x%08x 0x%08x\n", + local_rp->ptr, local_rp->dword[0], local_rp->dword[1]); + + MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event"); + + sequence = MHI_TRE_GET_EV_SEQ(local_rp); + remote_time = MHI_TRE_GET_EV_TIME(local_rp); + + do { + spin_lock_irq(&mhi_tsync->lock); + tsync_node = list_first_entry_or_null(&mhi_tsync->head, + struct tsync_node, node); + MHI_ASSERT(!tsync_node, "Unexpected Event"); + + if (unlikely(!tsync_node)) + break; + + list_del(&tsync_node->node); + spin_unlock_irq(&mhi_tsync->lock); + + /* + * device may not able to process all time sync commands + * host issue and only process last command it receive + */ + if (tsync_node->sequence == sequence) { + tsync_node->cb_func(tsync_node->mhi_dev, + sequence, + tsync_node->local_time, + remote_time); + kfree(tsync_node); + } else { + kfree(tsync_node); + } + } while (true); + + mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); + local_rp = ev_ring->rp; + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + count++; + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_er_db(mhi_event); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_VERB("exit er_index:%u\n", mhi_event->er_index); + + return count; +} + +void mhi_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + + MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index); + + mhi_cntrl->runtime_mark_last_busy(mhi_cntrl, mhi_cntrl->priv_data); + /* process all pending events */ + spin_lock_bh(&mhi_event->lock); + mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + spin_unlock_bh(&mhi_event->lock); +} + +void mhi_ctrl_ev_task(unsigned long data) +{ + struct mhi_event *mhi_event = (struct mhi_event *)data; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + enum mhi_dev_state state; + enum MHI_PM_STATE pm_state = 0; + int ret; + + MHI_VERB("Enter for ev_index:%d\n", mhi_event->er_index); + + /* + * we can check pm_state w/o a lock here because there is no way + * pm_state can change from reg access valid to no access while this + * therad being executed. + */ + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + return; + + mhi_cntrl->runtime_mark_last_busy(mhi_cntrl, mhi_cntrl->priv_data); + /* process ctrl events events */ + ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); + + /* + * we received a MSI but no events to process maybe device went to + * SYS_ERR state, check the state + */ + if (!ret) { + write_lock_irq(&mhi_cntrl->pm_lock); + state = mhi_get_mhi_state(mhi_cntrl); + if (state == MHI_STATE_SYS_ERR) { + MHI_ERR("MHI system error detected\n"); + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irq(&mhi_cntrl->pm_lock); + if (pm_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + } +} + +irqreturn_t mhi_msi_handlr(int irq_number, void *dev) +{ + struct mhi_event *mhi_event = dev; + struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; + struct mhi_event_ctxt *er_ctxt = + &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; + struct mhi_ring *ev_ring = &mhi_event->ring; + void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + /* confirm ER has pending events to process before scheduling work */ + if (ev_ring->rp == dev_rp) + return IRQ_HANDLED; + + /* client managed event ring, notify pending data */ + if (mhi_event->cl_manage) { + struct mhi_chan *mhi_chan = mhi_event->mhi_chan; + struct mhi_device *mhi_dev = mhi_chan->mhi_dev; + + if (mhi_dev) + mhi_dev->status_cb(mhi_dev, MHI_CB_PENDING_DATA); + } else + tasklet_schedule(&mhi_event->task); + + return IRQ_HANDLED; +} + +/* this is the threaded fn */ +irqreturn_t mhi_intvec_threaded_handlr(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + enum mhi_dev_state state = MHI_STATE_MAX; + enum MHI_PM_STATE pm_state = 0; + enum mhi_ee ee = MHI_EE_MAX; + unsigned long flags; + + MHI_VERB("Enter\n"); + + write_lock_irqsave(&mhi_cntrl->pm_lock, flags); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + if (mhi_cntrl->msi_allocated >= 5 ||(mhi_cntrl->msi_allocated == 1 && (mhi_cntrl->dev_state != state || mhi_cntrl->ee != ee))) + MHI_LOG("device ee:%s dev_state:%s, pm_state:%s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state), to_mhi_pm_state_str(mhi_cntrl->pm_state)); + } + + if (state == MHI_STATE_SYS_ERR) { + MHI_ERR("MHI system error detected\n"); + if (mhi_cntrl->pm_state != MHI_PM_SYS_ERR_DETECT) + pm_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_SYS_ERR_DETECT); + } + write_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); + if (pm_state == MHI_PM_SYS_ERR_DETECT) { + wake_up_all(&mhi_cntrl->state_event); + + if (mhi_cntrl->ee != ee) { + MHI_LOG("device ee:%s -> %s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee)); + schedule_work(&mhi_cntrl->syserr_worker); + } + /* for fatal errors, we let controller decide next step */ + else if (MHI_IN_PBL(ee)) + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, + MHI_CB_FATAL_ERROR); + else + schedule_work(&mhi_cntrl->syserr_worker); + } + if (mhi_cntrl->msi_allocated >= 5||(mhi_cntrl->msi_allocated == 1 && (mhi_cntrl->dev_state != state || mhi_cntrl->ee != ee))) + MHI_LOG("device ee:%s dev_state:%s, %s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state), TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + if (pm_state == MHI_PM_POR) { + wake_up_all(&mhi_cntrl->state_event); + } + + MHI_VERB("Exit\n"); + + return IRQ_HANDLED; +} + +irqreturn_t mhi_intvec_handlr(int irq_number, void *dev) +{ + + struct mhi_controller *mhi_cntrl = dev; + + /* wake up any events waiting for state change */ + MHI_VERB("Enter\n"); + wake_up_all(&mhi_cntrl->state_event); + MHI_VERB("Exit\n"); + + return IRQ_WAKE_THREAD; +} + +irqreturn_t mhi_one_msi_handlr(int irq_number, void *dev) +{ + struct mhi_controller *mhi_cntrl = dev; + struct mhi_event_ctxt *er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + u32 i; + u32 handle = 0; + + for (i = 0; i < NUM_MHI_EVT_RINGS; i++, er_ctxt++, mhi_event++) { + struct mhi_ring *ev_ring = &mhi_event->ring; + void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + if (ev_ring->rp != dev_rp) { + handle++; + mhi_msi_handlr(irq_number, mhi_event); + } + } + + if (handle ==0) { + mhi_intvec_threaded_handlr(irq_number, dev); + } + + return IRQ_HANDLED; +} + +int mhi_send_cmd(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan, + enum MHI_CMD cmd) +{ + struct mhi_tre *cmd_tre = NULL; + struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + struct mhi_ring *ring = &mhi_cmd->ring; + int chan = 0; + + MHI_VERB("Entered, MHI pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + if (mhi_chan) + chan = mhi_chan->chan; + + spin_lock_bh(&mhi_cmd->lock); + if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { + spin_unlock_bh(&mhi_cmd->lock); + return -ENOMEM; + } + + /* prepare the cmd tre */ + cmd_tre = ring->wp; + switch (cmd) { + case MHI_CMD_RESET_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); + break; + case MHI_CMD_START_CHAN: + cmd_tre->ptr = MHI_TRE_CMD_START_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); + break; + case MHI_CMD_TIMSYNC_CFG: + cmd_tre->ptr = MHI_TRE_CMD_TSYNC_CFG_PTR; + cmd_tre->dword[0] = MHI_TRE_CMD_TSYNC_CFG_DWORD0; + cmd_tre->dword[1] = MHI_TRE_CMD_TSYNC_CFG_DWORD1 + (mhi_cntrl->mhi_tsync->er_index); + break; + } + +#ifdef ENABLE_MHI_MON + if (mhi_cntrl->nreaders) { + mon_bus_submit(mhi_cntrl, 128, mhi_to_physical(ring, cmd_tre), cmd_tre, NULL, 0); + } +#endif + MHI_VERB("WP:0x%llx TRE: 0x%llx 0x%08x 0x%08x\n", + (u64)mhi_to_physical(ring, cmd_tre), cmd_tre->ptr, + cmd_tre->dword[0], cmd_tre->dword[1]); + + /* queue to hardware */ + mhi_add_ring_element(mhi_cntrl, ring); + read_lock_bh(&mhi_cntrl->pm_lock); + if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state))) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + read_unlock_bh(&mhi_cntrl->pm_lock); + spin_unlock_bh(&mhi_cmd->lock); + + return 0; +} + +static int __mhi_prepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret = 0; + + MHI_LOG("Entered: preparing channel:%d\n", mhi_chan->chan); + + if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { + MHI_ERR("Current EE:%s Required EE Mask:0x%x for chan:%s\n", + TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask, + mhi_chan->name); + return -ENOTCONN; + } + + mutex_lock(&mhi_chan->mutex); + + /* if channel is not disable state do not allow to start */ + if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) { + ret = -EIO; + MHI_LOG("channel:%d is not in disabled state, ch_state%d\n", + mhi_chan->chan, mhi_chan->ch_state); + goto error_init_chan; + } + + /* client manages channel context for offload channels */ + if (!mhi_chan->offload_ch) { + ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); + if (ret) { + MHI_ERR("Error with init chan\n"); + goto error_init_chan; + } + } + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("MHI host is not in active state\n"); + read_unlock_bh(&mhi_cntrl->pm_lock); + ret = -EIO; + goto error_pm_state; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN); + if (ret) { + MHI_ERR("Failed to send start chan cmd\n"); + goto error_send_cmd; + } + + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { + MHI_ERR("Failed to receive cmd completion for chan:%d\n", + mhi_chan->chan); + ret = -EIO; + goto error_send_cmd; + } + + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + + write_lock_irq(&mhi_chan->lock); + mhi_chan->ch_state = MHI_CH_STATE_ENABLED; + write_unlock_irq(&mhi_chan->lock); + + /* pre allocate buffer for xfer ring */ + if (mhi_chan->pre_alloc) { + int nr_el = get_nr_avail_ring_elements(mhi_cntrl, + &mhi_chan->tre_ring); + size_t len = mhi_cntrl->buffer_len; + + while (nr_el--) { + void *buf; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto error_pre_alloc; + } + + /* prepare transfer descriptors */ + ret = mhi_chan->gen_tre(mhi_cntrl, mhi_chan, buf, buf, + len, MHI_EOT); + if (ret) { + MHI_ERR("Chan:%d error prepare buffer\n", + mhi_chan->chan); + kfree(buf); + goto error_pre_alloc; + } + } + + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) { + read_lock_irq(&mhi_chan->lock); + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + read_unlock_irq(&mhi_chan->lock); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + } + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mutex_unlock(&mhi_chan->mutex); + + MHI_LOG("Chan:%d successfully moved to start state\n", mhi_chan->chan); + + return 0; + +error_send_cmd: + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + +error_pm_state: + if (!mhi_chan->offload_ch) + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + +error_init_chan: + mutex_unlock(&mhi_chan->mutex); + + return ret; + +error_pre_alloc: + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mutex_unlock(&mhi_chan->mutex); + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + return ret; +} + +static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, + struct mhi_event *mhi_event, + struct mhi_event_ctxt *er_ctxt, + int chan) +{ + struct mhi_tre *dev_rp, *local_rp; + struct mhi_ring *ev_ring; + unsigned long flags; + + MHI_LOG("Marking all events for chan:%d as stale\n", chan); + + ev_ring = &mhi_event->ring; + + /* mark all stale events related to channel as STALE event */ + spin_lock_irqsave(&mhi_event->lock, flags); + dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp); + + local_rp = ev_ring->rp; + while (dev_rp != local_rp) { + if (MHI_TRE_GET_EV_TYPE(local_rp) == + MHI_PKT_TYPE_TX_EVENT && + chan == MHI_TRE_GET_EV_CHID(local_rp)) + local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, + MHI_PKT_TYPE_STALE_EVENT); + local_rp++; + if (local_rp == (ev_ring->base + ev_ring->len)) + local_rp = ev_ring->base; + } + + + MHI_LOG("Finished marking events as stale events\n"); + spin_unlock_irqrestore(&mhi_event->lock, flags); +} + +static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_result result; + + /* reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + while (tre_ring->rp != tre_ring->wp) { + struct mhi_buf_info *buf_info = buf_ring->rp; + + if (mhi_chan->dir == DMA_TO_DEVICE) { + if (atomic_dec_return(&mhi_cntrl->pending_pkts) == 0) + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + + if (mhi_chan->dir == DMA_TO_DEVICE) + mhi_cntrl->wake_put(mhi_cntrl, false); + if (!buf_info->pre_mapped) + mhi_cntrl->unmap_single(mhi_cntrl, buf_info); + mhi_del_ring_element(mhi_cntrl, buf_ring); + mhi_del_ring_element(mhi_cntrl, tre_ring); + + if (mhi_chan->pre_alloc) { + kfree(buf_info->cb_buf); + } else { + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + } +} + +static void mhi_reset_rsc_chan(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + struct mhi_ring *buf_ring, *tre_ring; + struct mhi_result result; + struct mhi_buf_info *buf_info; + + /* reset any pending buffers */ + buf_ring = &mhi_chan->buf_ring; + tre_ring = &mhi_chan->tre_ring; + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + + buf_info = buf_ring->base; + for (; (void *)buf_info < buf_ring->base + buf_ring->len; buf_info++) { + if (!buf_info->used) + continue; + + result.buf_addr = buf_info->cb_buf; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + buf_info->used = false; + } +} + +void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) +{ + + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + int chan = mhi_chan->chan; + + /* nothing to reset, client don't queue buffers */ + if (mhi_chan->offload_ch) + return; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; + + mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); + + if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) + mhi_reset_rsc_chan(mhi_cntrl, mhi_chan); + else + mhi_reset_data_chan(mhi_cntrl, mhi_chan); + + read_unlock_bh(&mhi_cntrl->pm_lock); + MHI_LOG("Reset complete.\n"); +} + +static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, + struct mhi_chan *mhi_chan) +{ + int ret; + + MHI_LOG("Entered: unprepare channel:%d\n", mhi_chan->chan); + + /* no more processing events for this channel */ + mutex_lock(&mhi_chan->mutex); + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { + MHI_LOG("chan:%d is already disabled\n", mhi_chan->chan); + write_unlock_irq(&mhi_chan->lock); + mutex_unlock(&mhi_chan->mutex); + return; + } + + mhi_chan->ch_state = MHI_CH_STATE_DISABLED; + write_unlock_irq(&mhi_chan->lock); + + reinit_completion(&mhi_chan->completion); + read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + goto error_invalid_state; + } + + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN); + if (ret) { + MHI_ERR("Failed to send reset chan cmd\n"); + goto error_completion; + } + + /* even if it fails we will still reset */ + ret = wait_for_completion_timeout(&mhi_chan->completion, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) + MHI_ERR("Failed to receive cmd completion, still resetting\n"); + +error_completion: + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + +error_invalid_state: + if (!mhi_chan->offload_ch) { + mhi_reset_chan(mhi_cntrl, mhi_chan); + mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); + } + MHI_LOG("chan:%d successfully resetted\n", mhi_chan->chan); + mutex_unlock(&mhi_chan->mutex); +} + +int mhi_debugfs_mhi_states_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + int reg = 0; + int ret; + u32 val[4]; + + seq_printf(m, + "pm_state:%s dev_state:%s EE:%s M0:%u M2:%u M3:%u wake:%d dev_wake:%u alloc_size:%u\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), + mhi_cntrl->M0, mhi_cntrl->M2, mhi_cntrl->M3, + mhi_cntrl->wake_set, + atomic_read(&mhi_cntrl->dev_wake), + atomic_read(&mhi_cntrl->alloc_size)); + + seq_printf(m, + "mhi_state:%s exec_env:%s\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + + seq_printf(m, "dump mhi reg addr:%p\n", mhi_cntrl->regs); + for (reg = 0; reg < 0x100; reg+=16) { + val[0] = val[1] = val[2] = val[3] = 0xFFFFFFFF; + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+0, &val[0]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+4, &val[1]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+8, &val[2]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, reg+12, &val[3]); + seq_printf(m, "%02x: %08x %08x %08x %08x\n", reg, val[0], val[1], val[2], val[3]); + } + + seq_printf(m, "dump bhi reg addr:%p\n", mhi_cntrl->bhi); + for (reg = 0; reg < 0x100; reg+=16) { + val[0] = val[1] = val[2] = val[3] = 0xFFFFFFFF; + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+0, &val[0]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+4, &val[1]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+8, &val[2]); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, reg+12, &val[3]); + seq_printf(m, "%02x: %08x %08x %08x %08x\n", reg, val[0], val[1], val[2], val[3]); + } + + return 0; +} + +int mhi_debugfs_mhi_event_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_event *mhi_event; + struct mhi_event_ctxt *er_ctxt; + + int i; + + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev) { + seq_printf(m, "Index:%d offload event ring\n", i); + } else { + seq_printf(m, + "Index:%d modc:%d modt:%d base:0x%0llx len:0x%llx", + i, er_ctxt->intmodc, er_ctxt->intmodt, + er_ctxt->rbase, er_ctxt->rlen); + seq_printf(m, + " rp:0x%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n", + er_ctxt->rp, er_ctxt->wp, + (unsigned long long)mhi_to_physical(ring, ring->rp), + (unsigned long long)mhi_to_physical(ring, ring->wp), + (unsigned long long)mhi_event->db_cfg.db_val); + seq_printf(m, "used:%u\n", mhi_event->used_elements); + +#ifdef DEBUG_CHAN100_DB + if (mhi_event->mhi_chan && mhi_event->chan == 100) { + struct mhi_tre *tre = (struct mhi_tre *)ring->base; + size_t j; + + for (j = 0; j < ring->elements; j++, tre++) { + seq_printf(m, + "%08x: %llx, %08x, %08x\n", + (unsigned int)(j*sizeof(struct mhi_tre)), + tre->ptr, tre->dword[0], tre->dword[1]); + } + } +#endif + } + } + + return 0; +} + +int mhi_debugfs_mhi_chan_show(struct seq_file *m, void *d) +{ + struct mhi_controller *mhi_cntrl = m->private; + struct mhi_chan *mhi_chan; + struct mhi_chan_ctxt *chan_ctxt; + int i; + + mhi_chan = mhi_cntrl->mhi_chan; + chan_ctxt = mhi_cntrl->mhi_ctxt->chan_ctxt; + for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { + struct mhi_ring *ring = &mhi_chan->tre_ring; + + if (mhi_chan->ch_state == MHI_CH_STATE_DISABLED) + continue; + + if (mhi_chan->offload_ch) { + seq_printf(m, "%s(%u) offload channel\n", + mhi_chan->name, mhi_chan->chan); + } else if (mhi_chan->mhi_dev) { + seq_printf(m, + "%s(%u) state:0x%x brstmode:0x%x pllcfg:0x%x type:0x%x erindex:%u", + mhi_chan->name, mhi_chan->chan, + chan_ctxt->chstate, chan_ctxt->brstmode, + chan_ctxt->pollcfg, chan_ctxt->chtype, + chan_ctxt->erindex); + seq_printf(m, + " base:0x%llx len:0x%llx rp:%llx wp:0x%llx local_rp:0x%llx local_wp:0x%llx db:0x%llx\n", + chan_ctxt->rbase, chan_ctxt->rlen, + chan_ctxt->rp, chan_ctxt->wp, + (unsigned long long)mhi_to_physical(ring, ring->rp), + (unsigned long long)mhi_to_physical(ring, ring->wp), + (unsigned long long)mhi_chan->db_cfg.db_val); + seq_printf(m, "used:%u, EOB:%u, EOT:%u, OOB:%u, DB_MODE:%u\n", mhi_chan->used_elements, + mhi_chan->used_events[MHI_EV_CC_EOB], mhi_chan->used_events[MHI_EV_CC_EOT], + mhi_chan->used_events[MHI_EV_CC_OOB],mhi_chan->used_events[MHI_EV_CC_DB_MODE]); + +#ifdef DEBUG_CHAN100_DB + if (mhi_chan->chan == 100) { + unsigned int n = 0; + seq_printf(m, "chan100_seq = %04x\n", atomic_read(&chan100_seq)%CHAN100_SIZE); + for (n = 0; n < CHAN100_SIZE; n++) { + seq_printf(m, "%04x: %08x\n", n, chan100_t[n]); + } + } +#endif + +#if 0 + if (ring->base && /*(i&1) &&*/ (i < MHI_CLIENT_IP_HW_0_OUT)) { + struct mhi_tre *tre = (struct mhi_tre *)ring->base; + size_t e; + + for (e = 0; e < ring->elements; e++, tre++) { + seq_printf(m, "[%03d] %llx, %08x, %08x\n", i, tre->ptr, tre->dword[0], tre->dword[1]); + } + } +#endif + } + } + + return 0; +} + +/* move channel to start state */ +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) +{ + int ret, dir; + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + + if (!mhi_chan) + continue; + + ret = __mhi_prepare_channel(mhi_cntrl, mhi_chan); + if (ret) { + MHI_ERR("Error moving chan %s,%d to START state\n", + mhi_chan->name, mhi_chan->chan); + goto error_open_chan; + } + + if (mhi_dev->dl_chan == mhi_dev->ul_chan) { + break; + } + } + + return 0; + +error_open_chan: + for (--dir; dir >= 0; dir--) { + mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; + + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + } + + return ret; +} +EXPORT_SYMBOL(mhi_prepare_for_transfer); + +void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan; + int dir; + + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + __mhi_unprepare_channel(mhi_cntrl, mhi_chan); + + if (mhi_dev->dl_chan == mhi_dev->ul_chan) { + break; + } + } +} +EXPORT_SYMBOL(mhi_unprepare_from_transfer); + +int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev, + enum dma_data_direction dir) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? + mhi_dev->ul_chan : mhi_dev->dl_chan; + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + if (mhi_chan->offload_ch) + return 0; + + return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); +} +EXPORT_SYMBOL(mhi_get_no_free_descriptors); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 5,3,0 )) +static int __mhi_bdf_to_controller(struct device *dev, void *tmp) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_device *match = tmp; + + /* return any none-zero value if match */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE && + mhi_dev->domain == match->domain && mhi_dev->bus == match->bus && + mhi_dev->slot == match->slot && mhi_dev->dev_id == match->dev_id) + return 1; + + return 0; +} +#else +static int __mhi_bdf_to_controller(struct device *dev, const void *tmp) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + const struct mhi_device *match = tmp; + + /* return any none-zero value if match */ + if (mhi_dev->dev_type == MHI_CONTROLLER_TYPE && + mhi_dev->domain == match->domain && mhi_dev->bus == match->bus && + mhi_dev->slot == match->slot && mhi_dev->dev_id == match->dev_id) + return 1; + + return 0; +} +#endif + +struct mhi_controller *mhi_bdf_to_controller(u32 domain, + u32 bus, + u32 slot, + u32 dev_id) +{ + struct mhi_device tmp, *mhi_dev; + struct device *dev; + + tmp.domain = domain; + tmp.bus = bus; + tmp.slot = slot; + tmp.dev_id = dev_id; + + dev = bus_find_device(&mhi_bus_type, NULL, &tmp, + __mhi_bdf_to_controller); + if (!dev) + return NULL; + + mhi_dev = to_mhi_device(dev); + + return mhi_dev->mhi_cntrl; +} +EXPORT_SYMBOL(mhi_bdf_to_controller); + +int mhi_poll(struct mhi_device *mhi_dev, + u32 budget) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; + int ret; + + spin_lock_bh(&mhi_event->lock); + ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget); + spin_unlock_bh(&mhi_event->lock); + + return ret; +} +EXPORT_SYMBOL(mhi_poll); + +int mhi_get_remote_time_sync(struct mhi_device *mhi_dev, + u64 *t_host, + u64 *t_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + int ret; + + /* not all devices support time feature */ + if (!mhi_tsync) + return -EIO; + + /* bring to M0 state */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + mutex_lock(&mhi_tsync->lpm_mutex); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_invalid_state; + } + + /* disable link level low power modes */ + ret = mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data); + if (ret) + goto error_invalid_state; + + /* + * time critical code to fetch device times, + * delay between these two steps should be + * deterministic as possible. + */ + preempt_disable(); + local_irq_disable(); + + *t_host = mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data); + *t_dev = readq_relaxed_no_log(mhi_tsync->time_reg); + + local_irq_enable(); + preempt_enable(); + + mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data); + +error_invalid_state: + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_tsync->lpm_mutex); + + return ret; +} +EXPORT_SYMBOL(mhi_get_remote_time_sync); + +/** + * mhi_get_remote_time - Get external modem time relative to host time + * Trigger event to capture modem time, also capture host time so client + * can do a relative drift comparision. + * Recommended only tsync device calls this method and do not call this + * from atomic context + * @mhi_dev: Device associated with the channels + * @sequence:unique sequence id track event + * @cb_func: callback function to call back + */ +int mhi_get_remote_time(struct mhi_device *mhi_dev, + u32 sequence, + void (*cb_func)(struct mhi_device *mhi_dev, + u32 sequence, + u64 local_time, + u64 remote_time)) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_timesync *mhi_tsync = mhi_cntrl->mhi_tsync; + struct tsync_node *tsync_node; + int ret; + + /* not all devices support time feature */ + if (!mhi_tsync) + return -EIO; + + /* tsync db can only be rung in M0 state */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + /* + * technically we can use GFP_KERNEL, but wants to avoid + * # of times scheduling out + */ + tsync_node = kzalloc(sizeof(*tsync_node), GFP_ATOMIC); + if (!tsync_node) { + ret = -ENOMEM; + goto error_no_mem; + } + + tsync_node->sequence = sequence; + tsync_node->cb_func = cb_func; + tsync_node->mhi_dev = mhi_dev; + + /* disable link level low power modes */ + mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { + MHI_ERR("MHI is not in active state, pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_invalid_state; + } + + spin_lock_irq(&mhi_tsync->lock); + list_add_tail(&tsync_node->node, &mhi_tsync->head); + spin_unlock_irq(&mhi_tsync->lock); + + /* + * time critical code, delay between these two steps should be + * deterministic as possible. + */ + preempt_disable(); + local_irq_disable(); + + tsync_node->local_time = + mhi_cntrl->time_get(mhi_cntrl, mhi_cntrl->priv_data); + writel_relaxed_no_log(tsync_node->sequence, mhi_tsync->db); + /* write must go thru immediately */ + wmb(); + + local_irq_enable(); + preempt_enable(); + + ret = 0; + +error_invalid_state: + if (ret) + kfree(tsync_node); + read_unlock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->lpm_enable(mhi_cntrl, mhi_cntrl->priv_data); + +error_no_mem: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} +EXPORT_SYMBOL(mhi_get_remote_time); + +void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl) +{ + enum mhi_dev_state state; + enum mhi_ee ee; + int i, ret; + u32 val = 0; + void __iomem *mhi_base = mhi_cntrl->regs; + void __iomem *bhi_base = mhi_cntrl->bhi; + void __iomem *bhie_base = mhi_cntrl->bhie; + void __iomem *wake_db = mhi_cntrl->wake_db; + struct { + const char *name; + int offset; + void *base; + } debug_reg[] = { + { "MHI_CNTRL", MHICTRL, mhi_base}, + { "MHI_STATUS", MHISTATUS, mhi_base}, + { "MHI_WAKE_DB", 0, wake_db}, + { "BHI_EXECENV", BHI_EXECENV, bhi_base}, + { "BHI_STATUS", BHI_STATUS, bhi_base}, + { "BHI_ERRCODE", BHI_ERRCODE, bhi_base}, + { "BHI_ERRDBG1", BHI_ERRDBG1, bhi_base}, + { "BHI_ERRDBG2", BHI_ERRDBG2, bhi_base}, + { "BHI_ERRDBG3", BHI_ERRDBG3, bhi_base}, + { "BHIE_TXVEC_DB", BHIE_TXVECDB_OFFS, bhie_base}, + { "BHIE_TXVEC_STATUS", BHIE_TXVECSTATUS_OFFS, bhie_base}, + { "BHIE_RXVEC_DB", BHIE_RXVECDB_OFFS, bhie_base}, + { "BHIE_RXVEC_STATUS", BHIE_RXVECSTATUS_OFFS, bhie_base}, + { NULL }, + }; + + MHI_LOG("host pm_state:%s dev_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + state = mhi_get_mhi_state(mhi_cntrl); + ee = mhi_get_exec_env(mhi_cntrl); + + MHI_LOG("device ee:%s dev_state:%s\n", TO_MHI_EXEC_STR(ee), + TO_MHI_STATE_STR(state)); + + for (i = 0; debug_reg[i].name; i++) { + ret = mhi_read_reg(mhi_cntrl, debug_reg[i].base, + debug_reg[i].offset, &val); + MHI_LOG("reg:%s val:0x%x, ret:%d\n", debug_reg[i].name, val, + ret); + } +} +EXPORT_SYMBOL(mhi_debug_reg_dump); diff --git a/wwan/driver/quectel_MHI/src/core/mhi_pm.c b/wwan/driver/quectel_MHI/src/core/mhi_pm.c new file mode 100644 index 0000000..117a600 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/core/mhi_pm.c @@ -0,0 +1,1255 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mhi.h" +#include "mhi_internal.h" + +/* + * Not all MHI states transitions are sync transitions. Linkdown, SSR, and + * shutdown can happen anytime asynchronously. This function will transition to + * new state only if we're allowed to transitions. + * + * Priority increase as we go down, example while in any states from L0, start + * state from L1, L2, or L3 can be set. Notable exception to this rule is state + * DISABLE. From DISABLE state we can transition to only POR or state. Also + * for example while in L2 state, user cannot jump back to L1 or L0 states. + * Valid transitions: + * L0: DISABLE <--> POR + * POR <--> POR + * POR -> M0 -> M2 --> M0 + * POR -> FW_DL_ERR + * FW_DL_ERR <--> FW_DL_ERR + * M0 -> FW_DL_ERR + * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 + * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR + * L2: SHUTDOWN_PROCESS -> DISABLE + * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT + * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS + */ +static struct mhi_pm_transitions const mhi_state_transitions[] = { + /* L0 States */ + { + MHI_PM_DISABLE, + MHI_PM_POR + }, + { + MHI_PM_POR, + MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 | + MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M0, + MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT | + MHI_PM_FW_DL_ERR + }, + { + MHI_PM_M2, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_ENTER, + MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3, + MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_M3_EXIT, + MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_FW_DL_ERR, + MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT | + MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L1 States */ + { + MHI_PM_SYS_ERR_DETECT, + MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + { + MHI_PM_SYS_ERR_PROCESS, + MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS | + MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L2 States */ + { + MHI_PM_SHUTDOWN_PROCESS, + MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT + }, + /* L3 States */ + { + MHI_PM_LD_ERR_FATAL_DETECT, + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS + }, +}; + +enum MHI_PM_STATE __must_check mhi_tryset_pm_state( + struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE state) +{ + unsigned long cur_state = mhi_cntrl->pm_state; + int index = find_last_bit(&cur_state, 32); + + if (unlikely(index >= ARRAY_SIZE(mhi_state_transitions))) { + MHI_CRITICAL("cur_state:%s is not a valid pm_state\n", + to_mhi_pm_state_str(cur_state)); + return cur_state; + } + + if (unlikely(mhi_state_transitions[index].from_state != cur_state)) { + MHI_ERR("index:%u cur_state:%s != actual_state: %s\n", + index, to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str + (mhi_state_transitions[index].from_state)); + return cur_state; + } + + if (unlikely(!(mhi_state_transitions[index].to_states & state))) { + MHI_LOG( + "Not allowing pm state transition from:%s to:%s state\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(state)); + return cur_state; + } + + MHI_LOG("Transition to pm state from:%s to:%s\n", + to_mhi_pm_state_str(cur_state), to_mhi_pm_state_str(state)); + + mhi_cntrl->pm_state = state; + return mhi_cntrl->pm_state; +} + +void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, + enum mhi_dev_state state) +{ + if (state == MHI_STATE_RESET) { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1); + } else { + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, state); + } +} + +/* set device wake */ +void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force) +{ + unsigned long flags; + +#if 1 //carl.yin 20200907, according to WIN driver, only in M2 state, need to assert, and do not need to deassert + if (mhi_cntrl->dev_state == MHI_STATE_M2) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } + return; +#endif + + /* if set, regardless of count set the bit if not set */ + if (unlikely(force)) { + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + atomic_inc(&mhi_cntrl->dev_wake); + if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } else { + /* if resources requested already, then increment and exit */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) && + MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) && + !mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1); + mhi_cntrl->wake_set = true; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); + } +} + +/* clear device wake */ +void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override) +{ + unsigned long flags; + +#if 1 //carl.yin 20200907, according to WIN driver, only in M2 state, need to assert, and do not need to deassert + return; +#endif + +#if 1 //Add by Quectel + if (atomic_read(&mhi_cntrl->dev_wake) == 0) + return; +#endif + + MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake) == 0, "dev_wake == 0"); + + /* resources not dropping to 0, decrement and exit */ + if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1))) + return; + + spin_lock_irqsave(&mhi_cntrl->wlock, flags); + if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) && + MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override && + mhi_cntrl->wake_set) { + mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0); + mhi_cntrl->wake_set = false; + } + spin_unlock_irqrestore(&mhi_cntrl->wlock, flags); +} + +int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) +{ + void __iomem *base = mhi_cntrl->regs; + u32 reset = 1, ready = 0; + struct mhi_event *mhi_event; + enum MHI_PM_STATE cur_state; + int ret, i; + + MHI_LOG("Waiting to enter READY state\n"); + + /* wait for RESET to be cleared and READY bit to be set */ + wait_event_timeout(mhi_cntrl->state_event, + MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) || + mhi_read_reg_field(mhi_cntrl, base, MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, &reset) || + mhi_read_reg_field(mhi_cntrl, base, MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, &ready) || + (!reset && ready), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + /* device enter into error state */ + if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* device did not transition to ready state */ + if (reset || !ready) + return -ETIMEDOUT; + + MHI_LOG("Device in READY State\n"); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + mhi_cntrl->dev_state = MHI_STATE_READY; + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_POR) { + MHI_ERR("Error moving to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_POR), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + goto error_mmio; + + ret = mhi_init_mmio(mhi_cntrl); + if (ret) { + MHI_ERR("Error programming mmio registers\n"); + goto error_mmio; + } + + /* add elements to all sw event rings */ + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + /* needs to update to all cores */ + smp_wmb(); + + /* ring the db for event rings */ + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* set device into M0 state */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return 0; + +error_mmio: + read_unlock_bh(&mhi_cntrl->pm_lock); + + return -EIO; +} + +int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE cur_state; + struct mhi_chan *mhi_chan; + int i; + + MHI_LOG("Entered With State:%s PM_STATE:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M0; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_M0)) { + MHI_ERR("Failed to transition to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_M0), + to_mhi_pm_state_str(cur_state)); + return -EIO; + } + mhi_cntrl->M0++; + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + + /* ring all event rings and CMD ring only if we're in mission mode */ + if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { + struct mhi_event *mhi_event = mhi_cntrl->mhi_event; + struct mhi_cmd *mhi_cmd = + &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; + + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + + spin_lock_irq(&mhi_event->lock); + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + } + + /* only ring primary cmd ring */ + spin_lock_irq(&mhi_cmd->lock); + if (mhi_cmd->ring.rp != mhi_cmd->ring.wp) + mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); + spin_unlock_irq(&mhi_cmd->lock); + } + + /* ring channel db registers */ + mhi_chan = mhi_cntrl->mhi_chan; + for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { + struct mhi_ring *tre_ring = &mhi_chan->tre_ring; + + write_lock_irq(&mhi_chan->lock); + if (mhi_chan->db_cfg.reset_req) + mhi_chan->db_cfg.db_mode = true; + + /* only ring DB if ring is not empty */ + if (tre_ring->base && tre_ring->wp != tre_ring->rp) + mhi_ring_chan_db(mhi_cntrl, mhi_chan); + write_unlock_irq(&mhi_chan->lock); + } + + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + MHI_VERB("Exited\n"); + + return 0; +} + +void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE state; + + write_lock_irq(&mhi_cntrl->pm_lock); + /* if it fails, means we transition to M3 */ + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2); + if (state == MHI_PM_M2) { + MHI_VERB("Entered M2 State\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2); + mhi_cntrl->dev_state = MHI_STATE_M2; + mhi_cntrl->M2++; + + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + + /* transfer pending, exit M2 immediately */ + if (unlikely(atomic_read(&mhi_cntrl->dev_wake))) { + MHI_VERB("Exiting M2 Immediately, count:%d\n", + atomic_read(&mhi_cntrl->dev_wake)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + } else { + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, + MHI_CB_IDLE); + } + } else { + write_unlock_irq(&mhi_cntrl->pm_lock); + } +} + +int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE state; + + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->dev_state = MHI_STATE_M3; + state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (state != MHI_PM_M3) { + MHI_ERR("Failed to transition to state %s from %s\n", + to_mhi_pm_state_str(MHI_PM_M3), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + wake_up_all(&mhi_cntrl->state_event); + mhi_cntrl->M3++; + + MHI_LOG("Entered mhi_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return 0; +} + +static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) +{ + int i, ret; + struct mhi_event *mhi_event; + + MHI_LOG("Processing Mission Mode Transition\n"); + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, MHI_CB_EE_MISSION_MODE); + + /* force MHI to be in M0 state before continuing */ + ret = __mhi_device_get_sync(mhi_cntrl); + if (ret) + return ret; + + ret = -EIO; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + read_lock_bh(&mhi_cntrl->pm_lock); + if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) + goto error_mission_mode; + + wake_up_all(&mhi_cntrl->state_event); + + /* add elements to all HW event rings */ + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + goto error_mission_mode; + + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + if (mhi_event->offload_ev || !mhi_event->hw_ring) + continue; + + ring->wp = ring->base + ring->len - ring->el_size; + *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size; + /* all ring updates must get updated immediately */ + smp_wmb(); + + spin_lock_irq(&mhi_event->lock); + if (MHI_DB_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_ring_er_db(mhi_event); + spin_unlock_irq(&mhi_event->lock); + + } + + read_unlock_bh(&mhi_cntrl->pm_lock); + + /* setup support for time sync */ + mhi_init_timesync(mhi_cntrl); + + MHI_LOG("Adding new devices\n"); + + /* add supported devices */ + mhi_create_devices(mhi_cntrl); + + ret = 0; + + read_lock_bh(&mhi_cntrl->pm_lock); + +error_mission_mode: + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + MHI_LOG("Exit with ret:%d\n", ret); + + return ret; +} + +/* handles both sys_err and shutdown transitions */ +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, + enum MHI_PM_STATE transition_state) +{ + enum MHI_PM_STATE cur_state, prev_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + int ret, i; + + MHI_LOG("Enter with from pm_state:%s MHI_STATE:%s to pm_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(transition_state)); + + /* We must notify MHI control driver so it can clean up first */ + if (transition_state == MHI_PM_SYS_ERR_PROCESS) + mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data, + MHI_CB_SYS_ERROR); + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + prev_state = mhi_cntrl->pm_state; + cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); + if (cur_state == transition_state) { + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + } + write_unlock_irq(&mhi_cntrl->pm_lock); + + /* wake up any threads waiting for state transitions */ + wake_up_all(&mhi_cntrl->state_event); + + /* not handling sys_err, could be middle of shut down */ + if (cur_state != transition_state) { + MHI_LOG("Failed to transition to state:0x%x from:0x%x\n", + transition_state, cur_state); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* trigger MHI RESET so device will not access host ddr */ + if (MHI_REG_ACCESS_VALID(prev_state)) { + u32 in_reset = -1; + unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); + + MHI_LOG("Trigger device into MHI_RESET\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* wait for reset to be cleared */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, &in_reset) + || !in_reset, timeout); + if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) { + MHI_CRITICAL("Device failed to exit RESET state\n"); + mutex_unlock(&mhi_cntrl->pm_mutex); + return; + } + + /* Set the numbers of Event Rings supported */ + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT, NUM_MHI_EVT_RINGS); + mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT, NUM_MHI_HW_EVT_RINGS); + + /* + * device cleares INTVEC as part of RESET processing, + * re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); + } + + MHI_LOG("Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + tasklet_kill(&mhi_event->task); + } + + mutex_unlock(&mhi_cntrl->pm_mutex); + + MHI_LOG("Reset all active channels and remove mhi devices\n"); + mhi_cntrl->klog_slient = 1; + device_for_each_child(mhi_cntrl->dev, NULL, mhi_destroy_device); + mhi_cntrl->klog_slient = 0; + + MHI_LOG("Finish resetting channels\n"); + + MHI_LOG("Waiting for all pending threads to complete\n"); + wake_up_all(&mhi_cntrl->state_event); + flush_delayed_work(&mhi_cntrl->ready_worker); + flush_work(&mhi_cntrl->st_worker); + flush_work(&mhi_cntrl->fw_worker); + + mutex_lock(&mhi_cntrl->pm_mutex); + + MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake), "dev_wake != 0"); + + /* reset the ev rings and cmd rings */ + MHI_LOG("Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* do not touch offload er */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + /* remove support for time sync */ + mhi_destroy_timesync(mhi_cntrl); + + if (cur_state == MHI_PM_SYS_ERR_PROCESS) { + if (mhi_get_exec_env(mhi_cntrl) == MHI_EE_EDL && mhi_get_mhi_state(mhi_cntrl) == MHI_STATE_RESET) { + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_EDL; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR); + write_unlock_irq(&mhi_cntrl->pm_lock); + } + else + mhi_ready_state_transition(mhi_cntrl); + } else { + /* move to disable state */ + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_DISABLE)) + MHI_ERR("Error moving from pm state:%s to state:%s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_DISABLE)); + } + + MHI_LOG("Exit with pm_state:%s mhi_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +int mhi_debugfs_trigger_reset(void *data, u64 val) +{ + struct mhi_controller *mhi_cntrl = data; + enum MHI_PM_STATE cur_state; + int ret; + + MHI_LOG("Trigger MHI Reset\n"); + + /* exit lpm first */ + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state == MHI_PM_SYS_ERR_DETECT) + schedule_work(&mhi_cntrl->syserr_worker); + + return 0; +} + +/* queue a new work item and scheduler work */ +int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, + enum MHI_ST_TRANSITION state) +{ + struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); + unsigned long flags; + + if (!item) + return -ENOMEM; + + item->state = state; + spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); + list_add_tail(&item->node, &mhi_cntrl->transition_list); + spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); + + schedule_work(&mhi_cntrl->st_worker); + + return 0; +} + +void mhi_pm_sys_err_worker(struct work_struct *work) +{ + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + syserr_worker); + + MHI_LOG("Enter with pm_state:%s MHI_STATE:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); +} + +void mhi_pm_ready_worker(struct work_struct *work) +{ + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + ready_worker.work); + enum mhi_ee ee = MHI_EE_MAX; + + if (mhi_cntrl->dev_state != MHI_STATE_RESET) + return; + + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (ee == MHI_EE_PTHRU) + schedule_delayed_work(&mhi_cntrl->ready_worker, msecs_to_jiffies(10)); + else if (ee == MHI_EE_AMSS || ee == MHI_EE_SBL) + mhi_queue_state_transition(mhi_cntrl, MHI_ST_TRANSITION_READY); + else if (ee == MHI_EE_EDL) + mhi_queue_state_transition(mhi_cntrl, MHI_ST_TRANSITION_PBL); +} + +void mhi_pm_st_worker(struct work_struct *work) +{ + struct state_transition *itr, *tmp; + LIST_HEAD(head); + struct mhi_controller *mhi_cntrl = container_of(work, + struct mhi_controller, + st_worker); + spin_lock_irq(&mhi_cntrl->transition_lock); + list_splice_tail_init(&mhi_cntrl->transition_list, &head); + spin_unlock_irq(&mhi_cntrl->transition_lock); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + MHI_LOG("Transition to state:%s\n", + TO_MHI_STATE_TRANS_STR(itr->state)); + + if (mhi_cntrl->ee != mhi_get_exec_env(mhi_cntrl)) { + MHI_LOG("%s -> %s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee), + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + } + + switch (itr->state) { + case MHI_ST_TRANSITION_PBL: + write_lock_irq(&mhi_cntrl->pm_lock); + if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) + mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (MHI_IN_PBL(mhi_cntrl->ee)) + wake_up_all(&mhi_cntrl->state_event); + break; + case MHI_ST_TRANSITION_SBL: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_SBL; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); + mhi_create_devices(mhi_cntrl); + break; + case MHI_ST_TRANSITION_MISSION_MODE: + mhi_pm_mission_mode_transition(mhi_cntrl); + break; + case MHI_ST_TRANSITION_READY: + mhi_ready_state_transition(mhi_cntrl); + break; + case MHI_ST_TRANSITION_FP: + write_lock_irq(&mhi_cntrl->pm_lock); + mhi_cntrl->ee = MHI_EE_FP; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up(&mhi_cntrl->state_event); + mhi_create_devices(mhi_cntrl); + break; + default: + break; + } + kfree(itr); + } +} + +int mhi_async_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret; + u32 val, regVal; + enum mhi_ee current_ee; + enum MHI_ST_TRANSITION next_state; + + MHI_LOG("Requested to power on\n"); + +#if 0 + if (mhi_cntrl->msi_allocated < mhi_cntrl->total_ev_rings) + return -EINVAL; +#endif + + if (mhi_get_mhi_state(mhi_cntrl) >= MHI_STATE_M0) { + MHI_LOG("mhi_state = %s\n", mhi_state_str[mhi_get_mhi_state(mhi_cntrl)]); + MHI_LOG("Trigger device into MHI_RESET\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + msleep(50); + MHI_LOG("mhi_state = %s\n", mhi_state_str[mhi_get_mhi_state(mhi_cntrl)]); + } + +#if 1 //GLUE.SDX55_LE.1.0-00098-NOOP_TEST-1\common\hostdrivers\win\MhiHost MhiInitNewDev() + /* Check device Channels support */ + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, MHICFG, ®Val); +#if 0 + val = MHI_READ_REG_FIELD(regVal, MHICFG, NCH); + MHI_LOG("Device CHs: %d\n", val); + val = MHI_READ_REG_FIELD(regVal, MHICFG, NHWCH); + MHI_LOG("Device HW CHs: %d\n", val); + val = MHI_READ_REG_FIELD(regVal, MHICFG, NER); + MHI_LOG("Device ERs: %d\n", val); + val = MHI_READ_REG_FIELD(regVal, MHICFG, NHWER); + MHI_LOG("Device HW ERs: %d\n", val); +#endif + /* Set the numbers of Event Rings supported */ + MHI_WRITE_REG_FIELD(regVal, MHICFG, NER, NUM_MHI_EVT_RINGS); + MHI_WRITE_REG_FIELD(regVal, MHICFG, NHWER, NUM_MHI_HW_EVT_RINGS); + mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHICFG, regVal); +#endif + + /* set to default wake if not set */ + if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put) { + mhi_cntrl->wake_get = mhi_assert_dev_wake; + mhi_cntrl->wake_put = mhi_deassert_dev_wake; + } + + mutex_lock(&mhi_cntrl->pm_mutex); + mhi_cntrl->pm_state = MHI_PM_DISABLE; + mhi_cntrl->dev_state = MHI_STATE_RESET; + + if (!mhi_cntrl->pre_init) { + /* setup device context */ + ret = mhi_init_dev_ctxt(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting dev_context\n"); + goto error_dev_ctxt; + } + + ret = mhi_init_irq_setup(mhi_cntrl); + if (ret) { + MHI_ERR("Error setting up irq\n"); + goto error_setup_irq; + } + } + + /* setup bhi offset & intvec */ + write_lock_irq(&mhi_cntrl->pm_lock); + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error getting bhi offset\n"); + goto error_bhi_offset; + } + + mhi_cntrl->bhi = mhi_cntrl->regs + val; + + /* setup bhie offset */ + if (mhi_cntrl->fbc_download || true) { + ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val); + if (ret) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error getting bhie offset\n"); + goto error_bhi_offset; + } + + mhi_cntrl->bhie = mhi_cntrl->regs + val; + } + + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, mhi_cntrl->msi_irq_base); + mhi_cntrl->pm_state = MHI_PM_POR; + mhi_cntrl->ee = MHI_EE_MAX; + current_ee = mhi_get_exec_env(mhi_cntrl); + write_unlock_irq(&mhi_cntrl->pm_lock); + + MHI_LOG("dev_state:%s ee:%s\n", + TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)), + TO_MHI_EXEC_STR(mhi_get_exec_env(mhi_cntrl))); + + /* confirm device is in valid exec env */ + if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) { + //MHI_ERR("Not a valid ee for power on\n"); + //ret = -EIO; + //goto error_bhi_offset; + } + + /* transition to next state */ + next_state = MHI_IN_PBL(current_ee) ? + MHI_ST_TRANSITION_PBL : MHI_ST_TRANSITION_READY; + + //if (next_state == MHI_ST_TRANSITION_PBL) + // schedule_work(&mhi_cntrl->fw_worker); + + if (next_state == MHI_ST_TRANSITION_PBL) + schedule_delayed_work(&mhi_cntrl->ready_worker, msecs_to_jiffies(10)); + else + mhi_queue_state_transition(mhi_cntrl, next_state); + + mhi_init_debugfs(mhi_cntrl); + + mutex_unlock(&mhi_cntrl->pm_mutex); + + MHI_LOG("Power on setup success\n"); + + return 0; + +error_bhi_offset: + if (!mhi_cntrl->pre_init) + mhi_deinit_free_irq(mhi_cntrl); + +error_setup_irq: + if (!mhi_cntrl->pre_init) + mhi_deinit_dev_ctxt(mhi_cntrl); + +error_dev_ctxt: + mutex_unlock(&mhi_cntrl->pm_mutex); + + return ret; +} +EXPORT_SYMBOL(mhi_async_power_up); + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + enum MHI_PM_STATE cur_state; + + /* if it's not graceful shutdown, force MHI to a linkdown state */ + if (!graceful) { + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, + MHI_PM_LD_ERR_FATAL_DETECT); + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) + MHI_ERR("Failed to move to state:%s from:%s\n", + to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + } + mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); + + mhi_deinit_debugfs(mhi_cntrl); + + if (!mhi_cntrl->pre_init) { + /* free all allocated resources */ + if (mhi_cntrl->fbc_image) { + mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); + mhi_cntrl->fbc_image = NULL; + } + mhi_deinit_free_irq(mhi_cntrl); + mhi_deinit_dev_ctxt(mhi_cntrl); + } +} +EXPORT_SYMBOL(mhi_power_down); + +int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) +{ + int ret = mhi_async_power_up(mhi_cntrl); + + if (ret) + return ret; + + wait_event_timeout(mhi_cntrl->state_event, + MHI_IN_MISSION_MODE(mhi_cntrl->ee) || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + return (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -EIO; +} +EXPORT_SYMBOL(mhi_sync_power_up); + +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl) +{ + int ret; + enum MHI_PM_STATE new_state; + struct mhi_chan *itr, *tmp; + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return -EINVAL; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + /* do a quick check to see if any pending data, then exit */ + if (atomic_read(&mhi_cntrl->dev_wake)) { + MHI_VERB("Busy, aborting M3\n"); + return -EBUSY; + } + + /* exit MHI out of M2 state */ + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + mhi_cntrl->dev_state == MHI_STATE_M1 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR( + "Did not enter M0||M1 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + ret = -EIO; + goto error_m0_entry; + } + + write_lock_irq(&mhi_cntrl->pm_lock); + + /* we're asserting wake so count would be @ least 1 */ + if (atomic_read(&mhi_cntrl->dev_wake) > 1) { + MHI_VERB("Busy, aborting M3\n"); + write_unlock_irq(&mhi_cntrl->pm_lock); + ret = -EBUSY; + goto error_m0_entry; + } + + /* anytime after this, we will resume thru runtime pm framework */ + MHI_LOG("Allowing M3 transition\n"); + new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER); + if (new_state != MHI_PM_M3_ENTER) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n", + to_mhi_pm_state_str(MHI_PM_M3_ENTER), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + ret = -EIO; + goto error_m0_entry; + } + + /* set dev to M3 and wait for completion */ + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + mhi_cntrl->wake_put(mhi_cntrl, false); + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_LOG("Wait for M3 completion\n"); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M3 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M3 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* notify any clients we enter lpm */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER); + mutex_unlock(&itr->mutex); + } + + return 0; + +error_m0_entry: + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + return ret; +} +EXPORT_SYMBOL(mhi_pm_suspend); + +int mhi_pm_resume(struct mhi_controller *mhi_cntrl) +{ + enum MHI_PM_STATE cur_state; + int ret; + struct mhi_chan *itr, *tmp; + + MHI_LOG("Entered with pm_state:%s dev_state:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + if (mhi_cntrl->pm_state == MHI_PM_DISABLE) + return 0; + + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) + return -EIO; + + MHI_ASSERT(mhi_cntrl->pm_state != MHI_PM_M3, "mhi_pm_state != M3"); + + /* notify any clients we enter lpm */ + list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) { + mutex_lock(&itr->mutex); + if (itr->mhi_dev) + mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT); + mutex_unlock(&itr->mutex); + } + + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT); + if (cur_state != MHI_PM_M3_EXIT) { + write_unlock_irq(&mhi_cntrl->pm_lock); + MHI_ERR("Error setting to pm_state:%s from pm_state:%s\n", + to_mhi_pm_state_str(MHI_PM_M3_EXIT), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + return -EIO; + } + + /* set dev to M0 and wait for completion */ + mhi_cntrl->wake_get(mhi_cntrl, true); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + write_unlock_irq(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->dev_state == MHI_STATE_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + + /* + * It's possible device already in error state and we didn't + * process it due to low power mode, force a check + */ + mhi_intvec_threaded_handlr(0, mhi_cntrl); + return -EIO; + } + + return 0; +} + +int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) +{ + int ret; + + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) { + mhi_cntrl->runtime_get(mhi_cntrl, mhi_cntrl->priv_data); + mhi_cntrl->runtime_put(mhi_cntrl, mhi_cntrl->priv_data); + } + read_unlock_bh(&mhi_cntrl->pm_lock); + + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->pm_state == MHI_PM_M0 || + MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + + if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + MHI_ERR("Did not enter M0 state, cur_state:%s pm_state:%s\n", + TO_MHI_STATE_STR(mhi_cntrl->dev_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } + + return 0; +} + +void mhi_device_get(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + atomic_inc(&mhi_dev->dev_wake); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_get(mhi_cntrl, true); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL(mhi_device_get); + +int mhi_device_get_sync(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + int ret; + + ret = __mhi_device_get_sync(mhi_cntrl); + if (!ret) + atomic_inc(&mhi_dev->dev_wake); + + return ret; +} +EXPORT_SYMBOL(mhi_device_get_sync); + +void mhi_device_put(struct mhi_device *mhi_dev) +{ + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + atomic_dec(&mhi_dev->dev_wake); + read_lock_bh(&mhi_cntrl->pm_lock); + mhi_cntrl->wake_put(mhi_cntrl, false); + read_unlock_bh(&mhi_cntrl->pm_lock); +} +EXPORT_SYMBOL(mhi_device_put); + +int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl) +{ + int ret; + + MHI_LOG("Enter with pm_state:%s ee:%s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee)); + + MHI_LOG("Triggering SYS_ERR to force rddm state\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + + /* wait for rddm event */ + MHI_LOG("Waiting for device to enter RDDM state\n"); + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_cntrl->ee == MHI_EE_RDDM, + msecs_to_jiffies(mhi_cntrl->timeout_ms)); + ret = ret ? 0 : -EIO; + + MHI_LOG("Exiting with pm_state:%s ee:%s ret:%d\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_EXEC_STR(mhi_cntrl->ee), ret); + + return ret; +} +EXPORT_SYMBOL(mhi_force_rddm_mode); diff --git a/wwan/driver/quectel_MHI/src/core/mhi_sdx20.h b/wwan/driver/quectel_MHI/src/core/mhi_sdx20.h new file mode 100644 index 0000000..5a92efa --- /dev/null +++ b/wwan/driver/quectel_MHI/src/core/mhi_sdx20.h @@ -0,0 +1,362 @@ +#ifndef __SDX20_MHI_H +#define __SDX20_MHI_H + +#include + +/* MHI control data structures alloted by the host, including + * channel context array, event context array, command context and rings */ + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_state { + MHI_DEV_RESET_STATE = 0, + MHI_DEV_READY_STATE, + MHI_DEV_M0_STATE, + MHI_DEV_M1_STATE, + MHI_DEV_M2_STATE, + MHI_DEV_M3_STATE, + MHI_DEV_MAX_STATE, + MHI_DEV_SYSERR_STATE = 0xff +}; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +#define NUM_CHANNELS 128 +#define HW_CHANNEL_BASE 100 +#define HW_CHANNEL_END 107 +#define MHI_ENV_VALUE 2 +#define MHI_MASK_ROWS_CH_EV_DB 4 +#define TRB_MAX_DATA_SIZE 8192 +#define MHI_CTRL_STATE 25 +#define IPA_DMA_SYNC 1 +#define IPA_DMA_ASYNC 0 + +/*maximum trasnfer completion events buffer*/ +#define MAX_TR_EVENTS 50 +/*maximum event requests */ +#define MHI_MAX_EVT_REQ 50 + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element cmd_transfer; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +/* Transfer ring element type */ +union mhi_dev_ring_ctx { + struct mhi_dev_cmd_ctx cmd; + struct mhi_dev_ev_ctx ev; + struct mhi_dev_ch_ctx ch; + struct mhi_dev_gen_ctx generic; +}; + +/* MHI host Control and data address region */ +struct mhi_host_addr { + uint32_t ctrl_base_lsb; + uint32_t ctrl_base_msb; + uint32_t ctrl_limit_lsb; + uint32_t ctrl_limit_msb; + uint32_t data_base_lsb; + uint32_t data_base_msb; + uint32_t data_limit_lsb; + uint32_t data_limit_msb; +}; + +/* MHI physical and virtual address region */ +struct mhi_meminfo { + struct device *dev; + uintptr_t pa_aligned; + uintptr_t pa_unaligned; + uintptr_t va_aligned; + uintptr_t va_unaligned; + uintptr_t size; +}; + +struct mhi_addr { + uint64_t host_pa; + uintptr_t device_pa; + uintptr_t device_va; + size_t size; + dma_addr_t phy_addr; + void *virt_addr; + bool use_ipa_dma; +}; + +struct mhi_interrupt_state { + uint32_t mask; + uint32_t status; +}; + +enum mhi_dev_channel_state { + MHI_DEV_CH_UNINT, + MHI_DEV_CH_STARTED, + MHI_DEV_CH_PENDING_START, + MHI_DEV_CH_PENDING_STOP, + MHI_DEV_CH_STOPPED, + MHI_DEV_CH_CLOSED, +}; + +enum mhi_dev_ch_operation { + MHI_DEV_OPEN_CH, + MHI_DEV_CLOSE_CH, + MHI_DEV_READ_CH, + MHI_DEV_READ_WR, + MHI_DEV_POLL, +}; + +enum mhi_ctrl_info { + MHI_STATE_CONFIGURED = 0, + MHI_STATE_CONNECTED = 1, + MHI_STATE_DISCONNECTED = 2, + MHI_STATE_INVAL, +}; + +enum mhi_dev_tr_compl_evt_type { + SEND_EVENT_BUFFER, + SEND_EVENT_RD_OFFSET, +}; + +enum mhi_dev_transfer_type { + MHI_DEV_DMA_SYNC, + MHI_DEV_DMA_ASYNC, +}; +#endif /* _SDX20_MHI_H_ */ diff --git a/wwan/driver/quectel_MHI/src/core/sdx20_mhi.h b/wwan/driver/quectel_MHI/src/core/sdx20_mhi.h new file mode 100644 index 0000000..a7d3783 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/core/sdx20_mhi.h @@ -0,0 +1,426 @@ +#ifndef __SDX20_MHI_H +#define __SDX20_MHI_H + +#include + +/* MHI control data structures alloted by the host, including + * channel context array, event context array, command context and rings */ + +/* Channel context state */ +enum mhi_dev_ch_ctx_state { + MHI_DEV_CH_STATE_DISABLED, + MHI_DEV_CH_STATE_ENABLED, + MHI_DEV_CH_STATE_RUNNING, + MHI_DEV_CH_STATE_SUSPENDED, + MHI_DEV_CH_STATE_STOP, + MHI_DEV_CH_STATE_ERROR, + MHI_DEV_CH_STATE_RESERVED, + MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF +}; + +/* Channel type */ +enum mhi_dev_ch_ctx_type { + MHI_DEV_CH_TYPE_NONE, + MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, + MHI_DEV_CH_TYPE_INBOUND_CHANNEL, + MHI_DEV_CH_RESERVED +}; + +/* Channel context type */ +struct mhi_dev_ch_ctx { + enum mhi_dev_ch_ctx_state ch_state; + enum mhi_dev_ch_ctx_type ch_type; + uint32_t err_indx; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +enum mhi_dev_ring_element_type_id { + MHI_DEV_RING_EL_INVALID = 0, + MHI_DEV_RING_EL_NOOP = 1, + MHI_DEV_RING_EL_TRANSFER = 2, + MHI_DEV_RING_EL_RESET = 16, + MHI_DEV_RING_EL_STOP = 17, + MHI_DEV_RING_EL_START = 18, + MHI_DEV_RING_EL_MHI_STATE_CHG = 32, + MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, + MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, + MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, + MHI_DEV_RING_EL_UNDEF +}; + +enum mhi_dev_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, + RING_STATE_PENDING, +}; + +enum mhi_dev_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVAL +}; + +/* Event context interrupt moderation */ +enum mhi_dev_evt_ctx_int_mod_timer { + MHI_DEV_EVT_INT_MODERATION_DISABLED +}; + +/* Event ring type */ +enum mhi_dev_evt_ctx_event_ring_type { + MHI_DEV_EVT_TYPE_DEFAULT, + MHI_DEV_EVT_TYPE_VALID, + MHI_DEV_EVT_RESERVED +}; + +/* Event ring context type */ +struct mhi_dev_ev_ctx { + uint32_t res1:16; + enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; + enum mhi_dev_evt_ctx_event_ring_type ertype; + uint32_t msivec; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Command context */ +struct mhi_dev_cmd_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* generic context */ +struct mhi_dev_gen_ctx { + uint32_t res1; + uint32_t res2; + uint32_t res3; + uint64_t rbase; + uint64_t rlen; + uint64_t rp; + uint64_t wp; +} __packed; + +/* Transfer ring element */ +struct mhi_dev_transfer_ring_element { + uint64_t data_buf_ptr; + uint32_t len:16; + uint32_t res1:16; + uint32_t chain:1; + uint32_t res2:7; + uint32_t ieob:1; + uint32_t ieot:1; + uint32_t bei:1; + uint32_t res3:5; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res4:8; +} __packed; + +/* Command ring element */ +/* Command ring No op command */ +struct mhi_dev_cmd_ring_op { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring reset channel command */ +struct mhi_dev_cmd_ring_reset_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring stop channel command */ +struct mhi_dev_cmd_ring_stop_channel_cmd { + uint64_t res1; + uint32_t res2; + uint32_t res3:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command ring start channel command */ +struct mhi_dev_cmd_ring_start_channel_cmd { + uint64_t res1; + uint32_t seqnum; + uint32_t reliable:1; + uint32_t res2:15; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +enum mhi_dev_cmd_completion_code { + MHI_CMD_COMPL_CODE_INVALID = 0, + MHI_CMD_COMPL_CODE_SUCCESS = 1, + MHI_CMD_COMPL_CODE_EOT = 2, + MHI_CMD_COMPL_CODE_OVERFLOW = 3, + MHI_CMD_COMPL_CODE_EOB = 4, + MHI_CMD_COMPL_CODE_UNDEFINED = 16, + MHI_CMD_COMPL_CODE_RING_EL = 17, + MHI_CMD_COMPL_CODE_RES +}; + +/* Event ring elements */ +/* Transfer completion event */ +struct mhi_dev_event_ring_transfer_completion { + uint64_t ptr; + uint32_t len:16; + uint32_t res1:8; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +/* Command completion event */ +struct mhi_dev_event_ring_cmd_completion { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_cmd_completion_code code:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_state { + MHI_DEV_RESET_STATE = 0, + MHI_DEV_READY_STATE, + MHI_DEV_M0_STATE, + MHI_DEV_M1_STATE, + MHI_DEV_M2_STATE, + MHI_DEV_M3_STATE, + MHI_DEV_MAX_STATE, + MHI_DEV_SYSERR_STATE = 0xff +}; + +/* MHI state change event */ +struct mhi_dev_event_ring_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +enum mhi_dev_execenv { + MHI_DEV_SBL_EE = 1, + MHI_DEV_AMSS_EE = 2, + MHI_DEV_UNRESERVED +}; + +/* EE state change event */ +struct mhi_dev_event_ring_ee_state_change { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_execenv execenv:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t res3:8; +} __packed; + +/* Generic cmd to parse common details like type and channel id */ +struct mhi_dev_ring_generic { + uint64_t ptr; + uint32_t res1:24; + enum mhi_dev_state mhistate:8; + uint32_t res2:16; + enum mhi_dev_ring_element_type_id type:8; + uint32_t chid:8; +} __packed; + +struct mhi_config { + uint32_t mhi_reg_len; + uint32_t version; + uint32_t event_rings; + uint32_t channels; + uint32_t chdb_offset; + uint32_t erdb_offset; +}; + +#define NUM_CHANNELS 128 +#define HW_CHANNEL_BASE 100 +#define HW_CHANNEL_END 107 +#define MHI_ENV_VALUE 2 +#define MHI_MASK_ROWS_CH_EV_DB 4 +#define TRB_MAX_DATA_SIZE 8192 +#define MHI_CTRL_STATE 25 +#define IPA_DMA_SYNC 1 +#define IPA_DMA_ASYNC 0 + +/*maximum trasnfer completion events buffer*/ +#define MAX_TR_EVENTS 50 +/*maximum event requests */ +#define MHI_MAX_EVT_REQ 50 + +/* Possible ring element types */ +union mhi_dev_ring_element_type { + struct mhi_dev_cmd_ring_op cmd_no_op; + struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; + struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; + struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; + struct mhi_dev_transfer_ring_element tre; + struct mhi_dev_event_ring_transfer_completion evt_tr_comp; + struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; + struct mhi_dev_event_ring_state_change evt_state_change; + struct mhi_dev_event_ring_ee_state_change evt_ee_state; + struct mhi_dev_ring_generic generic; +}; + +/* Transfer ring element type */ +union mhi_dev_ring_ctx { + struct mhi_dev_cmd_ctx cmd; + struct mhi_dev_ev_ctx ev; + struct mhi_dev_ch_ctx ch; + struct mhi_dev_gen_ctx generic; +}; + +/* MHI host Control and data address region */ +struct mhi_host_addr { + uint32_t ctrl_base_lsb; + uint32_t ctrl_base_msb; + uint32_t ctrl_limit_lsb; + uint32_t ctrl_limit_msb; + uint32_t data_base_lsb; + uint32_t data_base_msb; + uint32_t data_limit_lsb; + uint32_t data_limit_msb; +}; + +/* MHI physical and virtual address region */ +struct mhi_meminfo { + struct device *dev; + uintptr_t pa_aligned; + uintptr_t pa_unaligned; + uintptr_t va_aligned; + uintptr_t va_unaligned; + uintptr_t size; +}; + +struct mhi_addr { + uint64_t host_pa; + uintptr_t device_pa; + uintptr_t device_va; + size_t size; + dma_addr_t phy_addr; + void *virt_addr; + bool use_ipa_dma; +}; + +struct mhi_interrupt_state { + uint32_t mask; + uint32_t status; +}; + +enum mhi_dev_channel_state { + MHI_DEV_CH_UNINT, + MHI_DEV_CH_STARTED, + MHI_DEV_CH_PENDING_START, + MHI_DEV_CH_PENDING_STOP, + MHI_DEV_CH_STOPPED, + MHI_DEV_CH_CLOSED, +}; + +enum mhi_dev_ch_operation { + MHI_DEV_OPEN_CH, + MHI_DEV_CLOSE_CH, + MHI_DEV_READ_CH, + MHI_DEV_READ_WR, + MHI_DEV_POLL, +}; + +enum mhi_ctrl_info { + MHI_STATE_CONFIGURED = 0, + MHI_STATE_CONNECTED = 1, + MHI_STATE_DISCONNECTED = 2, + MHI_STATE_INVAL, +}; + +enum mhi_dev_tr_compl_evt_type { + SEND_EVENT_BUFFER, + SEND_EVENT_RD_OFFSET, +}; + +enum mhi_dev_transfer_type { + MHI_DEV_DMA_SYNC, + MHI_DEV_DMA_ASYNC, +}; + +#if 0 +/* SW channel client list */ +enum mhi_client_channel { + MHI_CLIENT_LOOPBACK_OUT = 0, + MHI_CLIENT_LOOPBACK_IN = 1, + MHI_CLIENT_SAHARA_OUT = 2, + MHI_CLIENT_SAHARA_IN = 3, + MHI_CLIENT_DIAG_OUT = 4, + MHI_CLIENT_DIAG_IN = 5, + MHI_CLIENT_SSR_OUT = 6, + MHI_CLIENT_SSR_IN = 7, + MHI_CLIENT_QDSS_OUT = 8, + MHI_CLIENT_QDSS_IN = 9, + MHI_CLIENT_EFS_OUT = 10, + MHI_CLIENT_EFS_IN = 11, + MHI_CLIENT_MBIM_OUT = 12, + MHI_CLIENT_MBIM_IN = 13, + MHI_CLIENT_QMI_OUT = 14, + MHI_CLIENT_QMI_IN = 15, + MHI_CLIENT_IP_CTRL_0_OUT = 16, + MHI_CLIENT_IP_CTRL_0_IN = 17, + MHI_CLIENT_IP_CTRL_1_OUT = 18, + MHI_CLIENT_IP_CTRL_1_IN = 19, + MHI_CLIENT_DCI_OUT = 20, + MHI_CLIENT_DCI_IN = 21, + MHI_CLIENT_IP_CTRL_3_OUT = 22, + MHI_CLIENT_IP_CTRL_3_IN = 23, + MHI_CLIENT_IP_CTRL_4_OUT = 24, + MHI_CLIENT_IP_CTRL_4_IN = 25, + MHI_CLIENT_IP_CTRL_5_OUT = 26, + MHI_CLIENT_IP_CTRL_5_IN = 27, + MHI_CLIENT_IP_CTRL_6_OUT = 28, + MHI_CLIENT_IP_CTRL_6_IN = 29, + MHI_CLIENT_IP_CTRL_7_OUT = 30, + MHI_CLIENT_IP_CTRL_7_IN = 31, + MHI_CLIENT_DUN_OUT = 32, + MHI_CLIENT_DUN_IN = 33, + MHI_CLIENT_IP_SW_0_OUT = 34, + MHI_CLIENT_IP_SW_0_IN = 35, + MHI_CLIENT_IP_SW_1_OUT = 36, + MHI_CLIENT_IP_SW_1_IN = 37, + MHI_CLIENT_IP_SW_2_OUT = 38, + MHI_CLIENT_IP_SW_2_IN = 39, + MHI_CLIENT_IP_SW_3_OUT = 40, + MHI_CLIENT_IP_SW_3_IN = 41, + MHI_CLIENT_CSVT_OUT = 42, + MHI_CLIENT_CSVT_IN = 43, + MHI_CLIENT_SMCT_OUT = 44, + MHI_CLIENT_SMCT_IN = 45, + MHI_CLIENT_IP_SW_4_OUT = 46, + MHI_CLIENT_IP_SW_4_IN = 47, + MHI_MAX_SOFTWARE_CHANNELS = 48, + MHI_CLIENT_TEST_OUT = 60, + MHI_CLIENT_TEST_IN = 61, + MHI_CLIENT_RESERVED_1_LOWER = 62, + MHI_CLIENT_RESERVED_1_UPPER = 99, + MHI_CLIENT_IP_HW_0_OUT = 100, + MHI_CLIENT_IP_HW_0_IN = 101, + MHI_CLIENT_RESERVED_2_LOWER = 102, + MHI_CLIENT_RESERVED_2_UPPER = 127, + MHI_MAX_CHANNELS = 102, +}; +#endif +#endif /* _SDX20_MHI_H_ */ diff --git a/wwan/driver/quectel_MHI/src/devices/Kconfig b/wwan/driver/quectel_MHI/src/devices/Kconfig new file mode 100644 index 0000000..d92e95b --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/Kconfig @@ -0,0 +1,33 @@ +menu "MHI device support" + +config MHI_NETDEV + tristate "MHI NETDEV" + depends on MHI_BUS + help + MHI based net device driver for transferring IP traffic + between host and modem. By enabling this driver, clients + can transfer data using standard network interface. Over + the air traffic goes thru mhi netdev interface. + +config MHI_UCI + tristate "MHI UCI" + depends on MHI_BUS + help + MHI based uci driver is for transferring data between host and + modem using standard file operations from user space. Open, read, + write, ioctl, and close operations are supported by this driver. + Please check mhi_uci_match_table for all supported channels that + are exposed to userspace. + +config MHI_SATELLITE + tristate "MHI SATELLITE" + depends on MHI_BUS + help + MHI proxy satellite device driver enables NON-HLOS MHI satellite + drivers to communicate with device over PCIe link without host + involvement. Host facilitates propagation of events from device + to NON-HLOS MHI satellite drivers, channel states, and power + management over IPC communication. It helps in HLOS power + savings. + +endmenu diff --git a/wwan/driver/quectel_MHI/src/devices/Makefile b/wwan/driver/quectel_MHI/src/devices/Makefile new file mode 100644 index 0000000..e720069 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_MHI_NETDEV) +=mhi_netdev.o +obj-$(CONFIG_MHI_UCI) +=mhi_uci.o +obj-$(CONFIG_MHI_SATELLITE) +=mhi_satellite.o diff --git a/wwan/driver/quectel_MHI/src/devices/mhi_netdev.c b/wwan/driver/quectel_MHI/src/devices/mhi_netdev.c new file mode 100644 index 0000000..ed7b24b --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/mhi_netdev.c @@ -0,0 +1,1063 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +//#include +#include +#include +#include +//#include +#if 1 +static inline void *ipc_log_context_create(int max_num_pages, + const char *modname, uint16_t user_version) +{ return NULL; } +static inline int ipc_log_string(void *ilctxt, const char *fmt, ...) +{ return -EINVAL; } +#endif +#include +#include +#include +#include +#include +#include "../core/mhi.h" + +#define QUECTEL_NO_DTS + +extern void rmnet_data_init(struct net_device *real_dev, u32 nr_rmnet_devs); +extern void rmnet_data_deinit(struct net_device *real_dev, u32 nr_rmnet_devs); + +static const unsigned char node_id[ETH_ALEN] = {0x02, 0x50, 0xf4, 0x00, 0x00, 0x00}; +static const unsigned char default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00}; +static void qmap_hex_dump(const char *tag, unsigned char *data, unsigned len) { + uint i; + uint *d = (uint *)data; + + printk(KERN_DEBUG "%s data=%p, len=%x\n", tag, data, len); + len = (len+3)/4; + for (i = 0; i < len; i+=4) { + printk(KERN_DEBUG "%08x %08x %08x %08x %08x\n", i*4, d[i+0], d[i+1], d[i+2], d[i+3]); + } +} + +#define MHI_NETDEV_DRIVER_NAME "mhi_netdev" +#define WATCHDOG_TIMEOUT (30 * HZ) +#define IPC_LOG_PAGES (100) +#define MAX_NETBUF_SIZE (128) + +#ifdef CONFIG_MHI_DEBUG + +#define IPC_LOG_LVL (MHI_MSG_LVL_VERBOSE) + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) \ + panic(msg); \ +} while (0) + +#define MSG_VERB(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\ + if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \ + MHI_MSG_LVL_VERBOSE)) \ + ipc_log_string(mhi_netdev->ipc_log, "[D][%s] " fmt, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +#else + +#define IPC_LOG_LVL (MHI_MSG_LVL_ERROR) + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) { \ + MSG_ERR(msg); \ + WARN_ON(cond); \ + } \ +} while (0) + +#define MSG_VERB(fmt, ...) + +#endif + +#define MSG_LOG(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\ + if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \ + MHI_MSG_LVL_INFO)) \ + ipc_log_string(mhi_netdev->ipc_log, "[I][%s] " fmt, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +#define MSG_ERR(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ + if (mhi_netdev->ipc_log && (mhi_netdev->ipc_log_lvl <= \ + MHI_MSG_LVL_ERROR)) \ + ipc_log_string(mhi_netdev->ipc_log, "[E][%s] " fmt, \ + __func__, ##__VA_ARGS__); \ +} while (0) + +struct mhi_net_chain { + struct sk_buff *head, *tail; /* chained skb */ +}; + +struct mhi_netdev { + int alias; + struct mhi_device *mhi_dev; + struct mhi_netdev *rsc_dev; /* rsc linked node */ + bool is_rsc_dev; + int wake; + + u32 mru; + u32 order; + const char *interface_name; + struct napi_struct *napi; + struct net_device *ndev; + bool ethernet_interface; + + struct mhi_netbuf **netbuf_pool; + int pool_size; /* must be power of 2 */ + int current_index; + bool chain_skb; + struct mhi_net_chain *chain; + + struct dentry *dentry; + enum MHI_DEBUG_LEVEL msg_lvl; + enum MHI_DEBUG_LEVEL ipc_log_lvl; + void *ipc_log; + + //struct rmnet_port port; +}; + +struct mhi_netdev_priv { + struct mhi_netdev *mhi_netdev; +}; + +/* Try not to make this structure bigger than 128 bytes, since this take space + * in payload packet. + * Example: If MRU = 16K, effective MRU = 16K - sizeof(mhi_netbuf) + */ +struct mhi_netbuf { + struct mhi_buf mhi_buf; /* this must be first element */ + void (*unmap)(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir); +}; + +static struct mhi_driver mhi_netdev_driver; +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev); + +static __be16 mhi_netdev_ip_type_trans(u8 data) +{ + __be16 protocol = 0; + + /* determine L3 protocol */ + switch (data & 0xf0) { + case 0x40: + protocol = htons(ETH_P_IP); + break; + case 0x60: + protocol = htons(ETH_P_IPV6); + break; + default: + /* default is QMAP */ + protocol = htons(ETH_P_MAP); + break; + } + protocol = htons(ETH_P_MAP); //carl.yin fix set + return protocol; +} + +static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev, + gfp_t gfp, + unsigned int order) +{ + struct page *page; + struct mhi_netbuf *netbuf; + struct mhi_buf *mhi_buf; + void *vaddr; + + page = __dev_alloc_pages(gfp, order); + if (!page) + return NULL; + + vaddr = page_address(page); + + /* we going to use the end of page to store cached data */ + netbuf = vaddr + (PAGE_SIZE << order) - sizeof(*netbuf); + + mhi_buf = (struct mhi_buf *)netbuf; + mhi_buf->page = page; + mhi_buf->buf = vaddr; + mhi_buf->len = (void *)netbuf - vaddr; + mhi_buf->dma_addr = dma_map_page(dev, page, 0, mhi_buf->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, mhi_buf->dma_addr)) { + __free_pages(mhi_buf->page, order); + return NULL; + } + + return netbuf; +} + +static void mhi_netdev_unmap_page(struct device *dev, + dma_addr_t dma_addr, + size_t len, + enum dma_data_direction dir) +{ + dma_unmap_page(dev, dma_addr, len, dir); +} + +static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev, int nr_tre) +{ + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct device *dev = mhi_dev->dev.parent; + const u32 order = mhi_netdev->order; + int i, ret; + + for (i = 0; i < nr_tre; i++) { + struct mhi_buf *mhi_buf; + struct mhi_netbuf *netbuf = mhi_netdev_alloc(dev, GFP_ATOMIC, + order); + if (!netbuf) + return -ENOMEM; + + mhi_buf = (struct mhi_buf *)netbuf; + netbuf->unmap = mhi_netdev_unmap_page; + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf, + mhi_buf->len, MHI_EOT); + if (unlikely(ret)) { + MSG_ERR("Failed to queue transfer, ret:%d\n", ret); + mhi_netdev_unmap_page(dev, mhi_buf->dma_addr, + mhi_buf->len, DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, order); + return ret; + } + } + + return 0; +} + +static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev) +{ + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct device *dev = mhi_dev->dev.parent; + struct mhi_netbuf *netbuf; + struct mhi_buf *mhi_buf; + struct mhi_netbuf **netbuf_pool = mhi_netdev->netbuf_pool; + int nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + int i, peak, cur_index, ret; + const int pool_size = mhi_netdev->pool_size - 1, max_peak = 4; + + MSG_VERB("Enter free_desc:%d\n", nr_tre); + + if (!nr_tre) + return; + + /* try going thru reclaim pool first */ + for (i = 0; i < nr_tre; i++) { + /* peak for the next buffer, we going to peak several times, + * and we going to give up if buffers are not yet free + */ + cur_index = mhi_netdev->current_index; + netbuf = NULL; + for (peak = 0; peak < max_peak; peak++) { + struct mhi_netbuf *tmp = netbuf_pool[cur_index]; + + mhi_buf = &tmp->mhi_buf; + + cur_index = (cur_index + 1) & pool_size; + + /* page == 1 idle, buffer is free to reclaim */ + if (page_count(mhi_buf->page) == 1) { + netbuf = tmp; + break; + } + } + + /* could not find a free buffer */ + if (!netbuf) + break; + + /* increment reference count so when network stack is done + * with buffer, the buffer won't be freed + */ + get_page(mhi_buf->page); + dma_sync_single_for_device(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf, + mhi_buf->len, MHI_EOT); + if (unlikely(ret)) { + MSG_ERR("Failed to queue buffer, ret:%d\n", ret); + netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + put_page(mhi_buf->page); + return; + } + mhi_netdev->current_index = cur_index; + } + + /* recyling did not work, buffers are still busy allocate temp pkts */ + if (i < nr_tre) + mhi_netdev_tmp_alloc(mhi_netdev, nr_tre - i); +} + +/* allocating pool of memory */ +static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev) +{ + int i; + struct mhi_netbuf *netbuf, **netbuf_pool; + struct mhi_buf *mhi_buf; + const u32 order = mhi_netdev->order; + struct device *dev = mhi_netdev->mhi_dev->dev.parent; + + netbuf_pool = kmalloc_array(mhi_netdev->pool_size, sizeof(*netbuf_pool), + GFP_KERNEL); + if (!netbuf_pool) + return -ENOMEM; + + for (i = 0; i < mhi_netdev->pool_size; i++) { + /* allocate paged data */ + netbuf = mhi_netdev_alloc(dev, GFP_KERNEL, order); + if (!netbuf) + goto error_alloc_page; + + netbuf->unmap = dma_sync_single_for_cpu; + netbuf_pool[i] = netbuf; + } + + mhi_netdev->netbuf_pool = netbuf_pool; + + return 0; + +error_alloc_page: + for (--i; i >= 0; i--) { + netbuf = netbuf_pool[i]; + mhi_buf = &netbuf->mhi_buf; + dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, order); + } + + kfree(netbuf_pool); + + return -ENOMEM; +} + +static void mhi_netdev_free_pool(struct mhi_netdev *mhi_netdev) +{ + int i; + struct mhi_netbuf *netbuf, **netbuf_pool = mhi_netdev->netbuf_pool; + struct device *dev = mhi_netdev->mhi_dev->dev.parent; + struct mhi_buf *mhi_buf; + + for (i = 0; i < mhi_netdev->pool_size; i++) { + netbuf = netbuf_pool[i]; + mhi_buf = &netbuf->mhi_buf; + dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len, + DMA_FROM_DEVICE); + __free_pages(mhi_buf->page, mhi_netdev->order); + } + + kfree(mhi_netdev->netbuf_pool); + mhi_netdev->netbuf_pool = NULL; +} + +static int mhi_netdev_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev; + struct mhi_net_chain *chain = mhi_netdev->chain; + int rx_work = 0; + + MSG_VERB("Entered\n"); + + rx_work = mhi_poll(mhi_dev, budget); + + /* chained skb, push it to stack */ + if (chain && chain->head) { + netif_receive_skb(chain->head); + chain->head = NULL; + } + + if (rx_work < 0) { + MSG_ERR("Error polling ret:%d\n", rx_work); + napi_complete(napi); + return 0; + } + + /* queue new buffers */ + mhi_netdev_queue(mhi_netdev); + + if (rsc_dev) + mhi_netdev_queue(rsc_dev); + + /* complete work if # of packet processed less than allocated budget */ + if (rx_work < budget) + napi_complete(napi); + + MSG_VERB("polled %d pkts\n", rx_work); + + return rx_work; +} + +static int mhi_netdev_open(struct net_device *dev) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + MSG_LOG("Opened net dev interface\n"); + + /* tx queue may not necessarily be stopped already + * so stop the queue if tx path is not enabled + */ + if (!mhi_dev->ul_chan) + netif_stop_queue(dev); + else + netif_start_queue(dev); + + return 0; + +} + +static int mhi_netdev_change_mtu(struct net_device *dev, int new_mtu) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + if (new_mtu < 0 || mhi_dev->mtu < new_mtu) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} + +static int mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int res = 0; + + MSG_VERB("Entered\n"); + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + //qmap_hex_dump(__func__, skb->data, 32); + if (skb->protocol != htons(ETH_P_MAP)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len, + MHI_EOT); + if (res) { + MSG_VERB("Failed to queue with reason:%d\n", res); + netif_stop_queue(dev); + res = NETDEV_TX_BUSY; + } + + MSG_VERB("Exited\n"); + + return res; +} + +#if 0 +static int mhi_netdev_ioctl_extended(struct net_device *dev, struct ifreq *ifr) +{ + struct rmnet_ioctl_extended_s ext_cmd; + int rc = 0; + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data, + sizeof(struct rmnet_ioctl_extended_s)); + if (rc) + return rc; + + switch (ext_cmd.extended_ioctl) { + case RMNET_IOCTL_GET_SUPPORTED_FEATURES: + ext_cmd.u.data = 0; + break; + case RMNET_IOCTL_GET_DRIVER_NAME: + strlcpy(ext_cmd.u.if_name, mhi_netdev->interface_name, + sizeof(ext_cmd.u.if_name)); + break; + case RMNET_IOCTL_SET_SLEEP_STATE: + if (ext_cmd.u.data && mhi_netdev->wake) { + /* Request to enable LPM */ + MSG_VERB("Enable MHI LPM"); + mhi_netdev->wake--; + mhi_device_put(mhi_dev); + } else if (!ext_cmd.u.data && !mhi_netdev->wake) { + /* Request to disable LPM */ + MSG_VERB("Disable MHI LPM"); + mhi_netdev->wake++; + mhi_device_get(mhi_dev); + } + break; + default: + rc = -EINVAL; + break; + } + + rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd, + sizeof(struct rmnet_ioctl_extended_s)); + return rc; +} + +static int mhi_netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + int rc = 0; + struct rmnet_ioctl_data_s ioctl_data; + + switch (cmd) { + case RMNET_IOCTL_SET_LLP_IP: /* set RAWIP protocol */ + break; + case RMNET_IOCTL_GET_LLP: /* get link protocol state */ + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + case RMNET_IOCTL_GET_OPMODE: /* get operation mode */ + ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP; + if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data, + sizeof(struct rmnet_ioctl_data_s))) + rc = -EFAULT; + break; + case RMNET_IOCTL_SET_QOS_ENABLE: + rc = -EINVAL; + break; + case RMNET_IOCTL_SET_QOS_DISABLE: + rc = 0; + break; + case RMNET_IOCTL_OPEN: + case RMNET_IOCTL_CLOSE: + /* we just ignore them and return success */ + rc = 0; + break; + case RMNET_IOCTL_EXTENDED: + rc = mhi_netdev_ioctl_extended(dev, ifr); + break; + default: + /* don't fail any IOCTL right now */ + rc = 0; + break; + } + + return rc; +} +#endif + +static void mhi_netdev_get_drvinfo (struct net_device *ndev, struct ethtool_drvinfo *info) +{ + //struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + strlcpy (info->driver, "pcie_mhi", sizeof info->driver); + strlcpy (info->version, PCIE_MHI_DRIVER_VERSION, sizeof info->version); +} + +static const struct ethtool_ops mhi_netdev_ethtool_ops = { + .get_drvinfo = mhi_netdev_get_drvinfo, +}; + +static const struct net_device_ops mhi_netdev_ops_ip = { + .ndo_open = mhi_netdev_open, + .ndo_start_xmit = mhi_netdev_xmit, + //.ndo_do_ioctl = mhi_netdev_ioctl, + .ndo_change_mtu = mhi_netdev_change_mtu, + .ndo_set_mac_address = 0, + .ndo_validate_addr = 0, +}; + +static void mhi_netdev_setup(struct net_device *dev) +{ + dev->netdev_ops = &mhi_netdev_ops_ip; + ether_setup(dev); + + /* set this after calling ether_setup */ + dev->header_ops = 0; /* No header */ + dev->type = ARPHRD_RAWIP; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + dev->watchdog_timeo = WATCHDOG_TIMEOUT; + + dev->ethtool_ops = &mhi_netdev_ethtool_ops; + memcpy (dev->dev_addr, node_id, sizeof node_id); + dev->flags |= IFF_NOARP; +} + +/* enable mhi_netdev netdev, call only after grabbing mhi_netdev.mutex */ +static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev) +{ + int ret = 0; + char ifalias[IFALIASZ]; + char ifname[IFNAMSIZ]; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; +#ifndef QUECTEL_NO_DTS + struct device_node *of_node = mhi_dev->dev.of_node; +#endif + struct mhi_netdev_priv *mhi_netdev_priv; + + mhi_netdev->alias = 0;//of_alias_get_id(of_node, "mhi-netdev"); + if (mhi_netdev->alias < 0) + mhi_netdev->alias = 0; + +#ifdef QUECTEL_NO_DTS + mhi_netdev->interface_name = "rmnet_mhi"; +#else + + ret = of_property_read_string(of_node, "mhi,interface-name", + &mhi_netdev->interface_name); +#endif + if (ret) + mhi_netdev->interface_name = mhi_netdev_driver.driver.name; + + snprintf(ifalias, sizeof(ifalias), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias); + + snprintf(ifname, sizeof(ifname), "%s%%d", mhi_netdev->interface_name); + +#ifdef QUECTEL_NO_DTS + mhi_netdev->ethernet_interface = 0; +#else + mhi_netdev->ethernet_interface = of_property_read_bool(of_node, + "mhi,ethernet-interface"); +#endif + rtnl_lock(); + mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv), + ifname, NET_NAME_PREDICTABLE, + mhi_netdev_setup); + if (!mhi_netdev->ndev) { + rtnl_unlock(); + return -ENOMEM; + } + + mhi_netdev->ndev->mtu = mhi_dev->mtu; + SET_NETDEV_DEV(mhi_netdev->ndev, &mhi_dev->dev); + //dev_set_alias(mhi_netdev->ndev, ifalias, strlen(ifalias)); + mhi_netdev_priv = netdev_priv(mhi_netdev->ndev); + mhi_netdev_priv->mhi_netdev = mhi_netdev; + rtnl_unlock(); + + mhi_netdev->napi = devm_kzalloc(&mhi_dev->dev, + sizeof(*mhi_netdev->napi), GFP_KERNEL); + if (!mhi_netdev->napi) { + ret = -ENOMEM; + goto napi_alloc_fail; + } + + netif_napi_add(mhi_netdev->ndev, mhi_netdev->napi, + mhi_netdev_poll, NAPI_POLL_WEIGHT); + ret = register_netdev(mhi_netdev->ndev); + if (ret) { + MSG_ERR("Network device registration failed\n"); + goto net_dev_reg_fail; + } + + napi_enable(mhi_netdev->napi); + + MSG_LOG("Exited.\n"); + + return 0; + +net_dev_reg_fail: + netif_napi_del(mhi_netdev->napi); + +napi_alloc_fail: + free_netdev(mhi_netdev->ndev); + mhi_netdev->ndev = NULL; + + return ret; +} + +static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb = mhi_result->buf_addr; + struct net_device *ndev = mhi_netdev->ndev; + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + dev_kfree_skb(skb); + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); +} + +static void mhi_netdev_push_skb(struct mhi_netdev *mhi_netdev, + struct mhi_buf *mhi_buf, + struct mhi_result *mhi_result) +{ + struct sk_buff *skb; + + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) { + __free_pages(mhi_buf->page, mhi_netdev->order); + return; + } + + if (!mhi_netdev->ethernet_interface) { + skb_add_rx_frag(skb, 0, mhi_buf->page, 0, + mhi_result->bytes_xferd, mhi_netdev->mru); + skb->dev = mhi_netdev->ndev; + skb->protocol = mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf); + if (skb_linearize(skb)) + return; + } else { + skb_add_rx_frag(skb, 0, mhi_buf->page, ETH_HLEN, + mhi_result->bytes_xferd - ETH_HLEN, + mhi_netdev->mru); + skb->dev = mhi_netdev->ndev; + skb->protocol = mhi_netdev_ip_type_trans(((u8 *)mhi_buf->buf)[ETH_HLEN]); + } + netif_receive_skb(skb); +} + +static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct mhi_netbuf *netbuf = mhi_result->buf_addr; + struct mhi_buf *mhi_buf = &netbuf->mhi_buf; + struct sk_buff *skb; + struct net_device *ndev = mhi_netdev->ndev; + struct device *dev = mhi_dev->dev.parent; + struct mhi_net_chain *chain = mhi_netdev->chain; + + netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, DMA_FROM_DEVICE); + + /* modem is down, drop the buffer */ + if (mhi_result->transaction_status == -ENOTCONN) { + __free_pages(mhi_buf->page, mhi_netdev->order); + return; + } + + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += mhi_result->bytes_xferd; + + if (unlikely(!chain)) { + mhi_netdev_push_skb(mhi_netdev, mhi_buf, mhi_result); + return; + } + + /* we support chaining */ + skb = alloc_skb(0, GFP_ATOMIC); + if (likely(skb)) { + if (!mhi_netdev->ethernet_interface) { + skb_add_rx_frag(skb, 0, mhi_buf->page, 0, + mhi_result->bytes_xferd, mhi_netdev->mru); + } else { + skb_add_rx_frag(skb, 0, mhi_buf->page, ETH_HLEN, + mhi_result->bytes_xferd - ETH_HLEN, + mhi_netdev->mru); + } + + /* this is first on list */ + if (!chain->head) { + skb->dev = ndev; + if (!mhi_netdev->ethernet_interface) { + skb->protocol = + mhi_netdev_ip_type_trans(*(u8 *)mhi_buf->buf); + } else { + skb->protocol = + mhi_netdev_ip_type_trans(((u8 *)mhi_buf->buf)[ETH_HLEN]); + } + chain->head = skb; + } else { + skb_shinfo(chain->tail)->frag_list = skb; + } + + chain->tail = skb; + } else { + __free_pages(mhi_buf->page, mhi_netdev->order); + } +} + +static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + + if (mhi_cb != MHI_CB_PENDING_DATA) + return; + + napi_schedule(mhi_netdev->napi); +} + +#ifdef CONFIG_DEBUG_FS + +struct dentry *dentry; + +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) +{ + char node_name[32]; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + /* Both tx & rx client handle contain same device info */ + snprintf(node_name, sizeof(node_name), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias); + + if (IS_ERR_OR_NULL(dentry)) + return; + + mhi_netdev->dentry = debugfs_create_dir(node_name, dentry); + if (IS_ERR_OR_NULL(mhi_netdev->dentry)) + return; +} + +static void mhi_netdev_create_debugfs_dir(void) +{ + dentry = debugfs_create_dir(MHI_NETDEV_DRIVER_NAME, 0); +} + +#else + +static void mhi_netdev_create_debugfs(struct mhi_netdev_private *mhi_netdev) +{ +} + +static void mhi_netdev_create_debugfs_dir(void) +{ +} + +#endif + +static void mhi_netdev_remove(struct mhi_device *mhi_dev) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + + MSG_LOG("Remove notification received\n"); + + /* rsc parent takes cares of the cleanup */ + if (mhi_netdev->is_rsc_dev) { + mhi_netdev_free_pool(mhi_netdev); + return; + } + + rmnet_data_deinit(mhi_netdev->ndev, 1); + netif_stop_queue(mhi_netdev->ndev); + napi_disable(mhi_netdev->napi); + unregister_netdev(mhi_netdev->ndev); + netif_napi_del(mhi_netdev->napi); + free_netdev(mhi_netdev->ndev); + mhi_netdev_free_pool(mhi_netdev); + + if (!IS_ERR_OR_NULL(mhi_netdev->dentry)) + debugfs_remove_recursive(mhi_netdev->dentry); +} + +static int mhi_netdev_match(struct device *dev, void *data) +{ + /* if phandle dt == device dt, we found a match */ + return (dev->of_node == data); +} + +static void mhi_netdev_clone_dev(struct mhi_netdev *mhi_netdev, + struct mhi_netdev *parent) +{ + mhi_netdev->ndev = parent->ndev; + mhi_netdev->napi = parent->napi; + mhi_netdev->ipc_log = parent->ipc_log; + mhi_netdev->msg_lvl = parent->msg_lvl; + mhi_netdev->ipc_log_lvl = parent->ipc_log_lvl; + mhi_netdev->is_rsc_dev = true; + mhi_netdev->chain = parent->chain; +} + +static int mhi_netdev_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + int ret; + struct mhi_netdev *mhi_netdev, *p_netdev = NULL; +#ifndef QUECTEL_NO_DTS + struct device_node *of_node = mhi_dev->dev.of_node; +#endif + int nr_tre; + char node_name[32]; + struct device_node *phandle; + bool no_chain; + +#ifndef QUECTEL_NO_DTS + if (!of_node) + return -ENODEV; +#endif + + mhi_netdev = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_netdev), + GFP_KERNEL); + if (!mhi_netdev) + return -ENOMEM; + + mhi_netdev->mhi_dev = mhi_dev; + mhi_device_set_devdata(mhi_dev, mhi_netdev); + +#ifdef QUECTEL_NO_DTS + mhi_netdev->mru = 16384; + ret = 0; +#else + ret = of_property_read_u32(of_node, "mhi,mru", &mhi_netdev->mru); +#endif + if (ret) + return -ENODEV; + + /* MRU must be multiplication of page size */ + mhi_netdev->order = __ilog2_u32(mhi_netdev->mru / PAGE_SIZE); + if ((PAGE_SIZE << mhi_netdev->order) < mhi_netdev->mru) + return -EINVAL; + + /* check if this device shared by a parent device */ +#ifdef QUECTEL_NO_DTS + phandle = NULL; +#else + phandle = of_parse_phandle(of_node, "mhi,rsc-parent", 0); +#endif + if (phandle) { + struct device *dev; + struct mhi_device *pdev; + /* find the parent device */ + dev = driver_find_device(mhi_dev->dev.driver, NULL, phandle, + mhi_netdev_match); + if (!dev) + return -ENODEV; + + /* this device is shared with parent device. so we won't be + * creating a new network interface. Clone parent + * information to child node + */ + pdev = to_mhi_device(dev); + p_netdev = mhi_device_get_devdata(pdev); + mhi_netdev_clone_dev(mhi_netdev, p_netdev); + put_device(dev); + } else { + mhi_netdev->msg_lvl = MHI_MSG_LVL_ERROR; +#ifdef QUECTEL_NO_DTS + no_chain = 0; +#else + no_chain = of_property_read_bool(of_node, + "mhi,disable-chain-skb"); +#endif + if (!no_chain) { + mhi_netdev->chain = devm_kzalloc(&mhi_dev->dev, + sizeof(*mhi_netdev->chain), + GFP_KERNEL); + if (!mhi_netdev->chain) + return -ENOMEM; + } + + ret = mhi_netdev_enable_iface(mhi_netdev); + if (ret) + return ret; + + /* create ipc log buffer */ + snprintf(node_name, sizeof(node_name), + "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_netdev->alias); + mhi_netdev->ipc_log = ipc_log_context_create(IPC_LOG_PAGES, + node_name, 0); + mhi_netdev->ipc_log_lvl = IPC_LOG_LVL; + + mhi_netdev_create_debugfs(mhi_netdev); + } + + /* move mhi channels to start state */ + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) { + MSG_ERR("Failed to start channels ret %d\n", ret); + goto error_start; + } + + rmnet_data_init(mhi_netdev->ndev, 1); + + /* setup pool size ~2x ring length*/ + nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + mhi_netdev->pool_size = 1 << __ilog2_u32(nr_tre); + if (nr_tre > mhi_netdev->pool_size) + mhi_netdev->pool_size <<= 1; + mhi_netdev->pool_size <<= 1; + + /* allocate memory pool */ + ret = mhi_netdev_alloc_pool(mhi_netdev); + if (ret) + goto error_start; + + /* link child node with parent node if it's children dev */ + if (p_netdev) + p_netdev->rsc_dev = mhi_netdev; + + /* now we have a pool of buffers allocated, queue to hardware + * by triggering a napi_poll + */ + napi_schedule(mhi_netdev->napi); + + return 0; + +error_start: + if (phandle) + return ret; + + netif_stop_queue(mhi_netdev->ndev); + napi_disable(mhi_netdev->napi); + unregister_netdev(mhi_netdev->ndev); + netif_napi_del(mhi_netdev->napi); + free_netdev(mhi_netdev->ndev); + + return ret; +} + +static const struct mhi_device_id mhi_netdev_match_table[] = { + { .chan = "IP_HW0" }, + { .chan = "IP_HW_ADPL" }, + { .chan = "IP_HW0_RSC" }, + { .chan = "IP_SW0" }, + {}, +}; + +static struct mhi_driver mhi_netdev_driver = { + .id_table = mhi_netdev_match_table, + .probe = mhi_netdev_probe, + .remove = mhi_netdev_remove, + .ul_xfer_cb = mhi_netdev_xfer_ul_cb, + .dl_xfer_cb = mhi_netdev_xfer_dl_cb, + .status_cb = mhi_netdev_status_cb, + .driver = { + .name = "mhi_netdev", + .owner = THIS_MODULE, + } +}; + +static int __init mhi_netdev_init(void) +{ + BUILD_BUG_ON(sizeof(struct mhi_netbuf) > MAX_NETBUF_SIZE); + mhi_netdev_create_debugfs_dir(); + + return mhi_driver_register(&mhi_netdev_driver); +} +//module_init(mhi_netdev_init); + +int __init mhi_device_netdev_init(struct dentry *parent) +{ + BUILD_BUG_ON(sizeof(struct mhi_netbuf) > MAX_NETBUF_SIZE); + + return mhi_netdev_init(); +} + +void mhi_device_netdev_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(dentry); +#endif + mhi_driver_unregister(&mhi_netdev_driver); +} + diff --git a/wwan/driver/quectel_MHI/src/devices/mhi_netdev_quectel.c b/wwan/driver/quectel_MHI/src/devices/mhi_netdev_quectel.c new file mode 100644 index 0000000..1812c02 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/mhi_netdev_quectel.c @@ -0,0 +1,3034 @@ +/* Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#define CONFIG_IPQ5018_RATE_CONTROL //Only used with spf11.5 for IPQ5018 +#if defined(CONFIG_IPQ5018_RATE_CONTROL) +//#include +#include +#endif + +#include "../core/mhi.h" +//#define MHI_NETDEV_ONE_CARD_MODE +//#define ANDROID_gki //some fuction not allow used in this TEST + +#ifndef ETH_P_MAP +#define ETH_P_MAP 0xDA1A +#endif + +#if (ETH_P_MAP == 0x00F9) +#undef ETH_P_MAP +#define ETH_P_MAP 0xDA1A +#endif + +#ifndef ARPHRD_RAWIP +#define ARPHRD_RAWIP ARPHRD_NONE +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 4,2,0 )) +static bool netdev_is_rx_handler_busy(struct net_device *dev) +{ + ASSERT_RTNL(); + return dev && rtnl_dereference(dev->rx_handler); +} +#endif + +struct rmnet_nss_cb { + int (*nss_create)(struct net_device *dev); + int (*nss_free)(struct net_device *dev); + int (*nss_tx)(struct sk_buff *skb); +}; +static struct rmnet_nss_cb __read_mostly *nss_cb = NULL; +#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018) || defined(CONFIG_PINCTRL_IPQ8074) +//#ifdef CONFIG_RMNET_DATA //spf12.x have no macro defined, just for spf11.x +#define CONFIG_QCA_NSS_DRV +/* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */ //for spf11.x +/* define at qsdk/qca/src/datarmnet/core/rmnet_config.c */ //for spf12.x +/* set at qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c */ +/* need add DEPENDS:= kmod-rmnet-core in feeds/makefile */ +extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; +//#endif +#endif + +static const unsigned char node_id[ETH_ALEN] = {0x02, 0x50, 0xf4, 0x00, 0x00, 0x00}; +static const unsigned char default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3, 0x00, 0x00, 0x00}; + +#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) +#define QUECTEL_BRIDGE_MODE +#endif + +#define QUECTEL_RMNET_MODE + +#ifdef QUECTEL_BRIDGE_MODE +static uint __read_mostly bridge_mode = 0/*|BIT(1)*/; +module_param( bridge_mode, uint, S_IRUGO ); +#endif + +struct qmap_hdr { + u8 cd_rsvd_pad; + u8 mux_id; + u16 pkt_len; +} __packed; +#define QUECTEL_QMAP_MUX_ID 0x81 + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +enum rmnet_map_commands { + RMNET_MAP_COMMAND_NONE, + RMNET_MAP_COMMAND_FLOW_DISABLE, + RMNET_MAP_COMMAND_FLOW_ENABLE, + RMNET_MAP_COMMAND_FLOW_START = 7, + RMNET_MAP_COMMAND_FLOW_END = 8, + /* These should always be the last 2 elements */ + RMNET_MAP_COMMAND_UNKNOWN, + RMNET_MAP_COMMAND_ENUM_LENGTH +}; + +#define RMNET_MAP_COMMAND_REQUEST 0 +#define RMNET_MAP_COMMAND_ACK 1 +#define RMNET_MAP_COMMAND_UNSUPPORTED 2 +#define RMNET_MAP_COMMAND_INVALID 3 + +/* Main QMAP header */ +struct rmnet_map_header { + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; + __be16 reserved; +} __aligned(1); + +struct rmnet_map_control_command { + u8 command_name; + u8 cmd_type:2; + u8 reserved:6; + u16 reserved2; + u32 transaction_id; + union { + struct { + u8 reserved2; + u8 ip_family:2; + u8 reserved:6; + __be16 flow_control_seq_num; + __be32 qos_id; + } flow_control; + u8 data[0]; + }; +} __aligned(1); + +struct mhi_mbim_hdr { + struct usb_cdc_ncm_nth16 nth16; + struct usb_cdc_ncm_ndp16 ndp16; + struct usb_cdc_ncm_dpe16 dpe16[2]; +} __attribute__ ((packed)); + +#define QCUSB_MRECEIVE_MAX_BUFFER_SIZE (1024*32) //maybe 31KB is enough +#define QCUSB_MTRANSMIT_MAX_BUFFER_SIZE (1024*16) +#define NTB_OUT_MAX_DATAGRAMS 16 + +static const struct usb_cdc_ncm_ntb_parameters ncmNTBParams = { + .bmNtbFormatsSupported = USB_CDC_NCM_NTB16_SUPPORTED, + .dwNtbInMaxSize = QCUSB_MRECEIVE_MAX_BUFFER_SIZE, + .wNdpInDivisor = 0x04, + .wNdpInPayloadRemainder = 0x0, + .wNdpInAlignment = 0x4, + + .dwNtbOutMaxSize = QCUSB_MTRANSMIT_MAX_BUFFER_SIZE, + .wNdpOutDivisor = 0x04, + .wNdpOutPayloadRemainder = 0x0, + .wNdpOutAlignment = 0x4, + .wNtbOutMaxDatagrams = NTB_OUT_MAX_DATAGRAMS, +}; + +#if 0 +static void qmap_hex_dump(const char *tag, unsigned char *data, unsigned len) { + uint i; + uint *d = (uint *)data; + + printk(KERN_DEBUG "%s data=%p, len=%x\n", tag, data, len); + len = (len+3)/4; + for (i = 0; i < len; i+=4) { + printk(KERN_DEBUG "%08x %08x %08x %08x %08x\n", i*4, d[i+0], d[i+1], d[i+2], d[i+3]); + } +} +#else +static void qmap_hex_dump(const char *tag, unsigned char *data, unsigned len) { +} +#endif + +static uint __read_mostly mhi_mbim_enabled = 0; +module_param(mhi_mbim_enabled, uint, S_IRUGO); +int mhi_netdev_mbin_enabled(void) { return mhi_mbim_enabled; } + +static uint __read_mostly qmap_mode = 1; +module_param(qmap_mode, uint, S_IRUGO); + +static uint __read_mostly poll_weight = NAPI_POLL_WEIGHT; +module_param(poll_weight, uint, S_IRUGO); + +#define MHI_NETDEV_DRIVER_NAME "mhi_netdev" +#define WATCHDOG_TIMEOUT (30 * HZ) + +#define MSG_VERB(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MHI_ASSERT(cond, msg) do { \ + if (cond) { \ + MSG_ERR(msg); \ + WARN_ON(cond); \ + } \ +} while (0) + +#define MSG_LOG(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\ +} while (0) + +#define MSG_ERR(fmt, ...) do { \ + if (mhi_netdev->msg_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ +} while (0) + +struct mhi_stats { + u32 rx_int; + u32 tx_full; + u32 tx_pkts; + u32 rx_budget_overflow; + u32 tx_allocated; + u32 rx_allocated; + u32 alloc_failed; +}; + +/* important: do not exceed sk_buf->cb (48 bytes) */ +struct mhi_skb_priv { + void *buf; + size_t size; + struct mhi_netdev *bind_netdev; +}; + +struct skb_data { /* skb->cb is one of these */ + struct mhi_netdev *bind_netdev; + unsigned int length; + unsigned int packets; +}; + +#define MHI_NETDEV_STATUS64 1 + +typedef struct { + uint size; + uint rx_urb_size; + uint ep_type; + uint iface_id; + uint MuxId; + uint ul_data_aggregation_max_datagrams; //0x17 + uint ul_data_aggregation_max_size ;//0x18 + uint dl_minimum_padding; //0x1A +} QMAP_SETTING; + +typedef struct { + unsigned int size; + unsigned int rx_urb_size; + unsigned int ep_type; + unsigned int iface_id; + unsigned int qmap_mode; + unsigned int qmap_version; + unsigned int dl_minimum_padding; + char ifname[8][16]; + unsigned char mux_id[8]; +} RMNET_INFO; + +typedef struct { + u16 tx_seq; + u16 rx_seq; + u32 rx_max; +} MHI_MBIM_CTX; + +enum mhi_net_type { + MHI_NET_UNKNOW, + MHI_NET_RMNET, + MHI_NET_MBIM, + MHI_NET_ETHER +}; + +//#define TS_DEBUG +struct mhi_netdev { + int alias; + struct mhi_device *mhi_dev; + spinlock_t rx_lock; + bool enabled; + rwlock_t pm_lock; /* state change lock */ + int (*rx_queue)(struct mhi_netdev *mhi_netdev, gfp_t gfp_t); + struct delayed_work alloc_work; + int wake; + + struct sk_buff_head tx_allocated; + struct sk_buff_head rx_allocated; + struct sk_buff_head qmap_chain; + struct sk_buff_head skb_chain; +#ifdef TS_DEBUG + uint clear_ts; + struct timespec diff_ts; + struct timespec qmap_ts; + struct timespec skb_ts; +#endif + + MHI_MBIM_CTX mbim_ctx; + + u32 mru; + u32 max_mtu; + const char *interface_name; + struct napi_struct napi; + struct net_device *ndev; + enum mhi_net_type net_type; + struct sk_buff *frag_skb; + bool recycle_buf; + +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats __percpu *stats64; +#endif + struct mhi_stats stats; + + struct dentry *dentry; + enum MHI_DEBUG_LEVEL msg_lvl; + + struct net_device *mpQmapNetDev[8]; + u32 qmap_mode; + u32 qmap_version; // 5 ~ QMAP V1, 9 ~ QMAP V5 + u32 qmap_size; + u32 link_state; + u32 flow_control; + u32 dl_minimum_padding; + +#ifdef QUECTEL_BRIDGE_MODE + uint bridge_mode; + uint bridge_ipv4; + unsigned char bridge_mac[ETH_ALEN]; +#endif + uint use_rmnet_usb; + RMNET_INFO rmnet_info; + +#if defined(CONFIG_IPQ5018_RATE_CONTROL) + u64 first_jiffy; + u64 bytes_received_1; + u64 bytes_received_2; + u32 cntfrq_per_msec; + bool mhi_rate_control; +#endif + + u32 rmnet_map_command_stats[RMNET_MAP_COMMAND_ENUM_LENGTH]; +}; + +struct mhi_netdev_priv { + struct mhi_netdev *mhi_netdev; +}; + +struct qmap_priv { + void *pQmapDev; + struct net_device *real_dev; + struct net_device *self_dev; + u8 offset_id; + u8 mux_id; + u8 qmap_version; // 5~v1, 9~v5 + +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats __percpu *stats64; +#endif + + spinlock_t agg_lock; + struct sk_buff *agg_skb; + unsigned agg_count; + struct timespec64 agg_time; + struct hrtimer agg_hrtimer; + struct work_struct agg_wq; + +#ifdef QUECTEL_BRIDGE_MODE + uint bridge_mode; + uint bridge_ipv4; + unsigned char bridge_mac[ETH_ALEN]; +#endif + uint use_qca_nss; +}; + +static struct mhi_netdev *ndev_to_mhi(struct net_device *ndev) { + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(ndev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + return mhi_netdev; +} + +static struct mhi_driver mhi_netdev_driver; +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev); + +#if 0 +static void mhi_netdev_skb_destructor(struct sk_buff *skb) +{ + struct mhi_skb_priv *skb_priv = (struct mhi_skb_priv *)(skb->cb); + struct mhi_netdev *mhi_netdev = skb_priv->mhi_netdev; + + skb->data = skb->head; + skb_reset_tail_pointer(skb); + skb->len = 0; + MHI_ASSERT(skb->data != skb_priv->buf, "incorrect buf"); + skb_queue_tail(&mhi_netdev->rx_allocated, skb); +} +#endif + +#ifdef QUECTEL_BRIDGE_MODE +static const struct net_device_ops mhi_netdev_ops_ip; +static const struct net_device_ops rmnet_vnd_ops; + +static int is_qmap_netdev(const struct net_device *ndev) { + return ndev->netdev_ops == &rmnet_vnd_ops; +} + +static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4) { + struct arphdr *parp; + u8 *arpptr, *sha; + u8 sip[4], tip[4], ipv4[4]; + struct sk_buff *reply = NULL; + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + parp = arp_hdr(skb); + + if (parp->ar_hrd == htons(ARPHRD_ETHER) && parp->ar_pro == htons(ETH_P_IP) + && parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) { + arpptr = (u8 *)parp + sizeof(struct arphdr); + sha = arpptr; + arpptr += net->addr_len; /* sha */ + memcpy(sip, arpptr, sizeof(sip)); + arpptr += sizeof(sip); + arpptr += net->addr_len; /* tha */ + memcpy(tip, arpptr, sizeof(tip)); + + pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d, ipv4=%d.%d.%d.%d\n", netdev_name(net), + sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3], ipv4[0], ipv4[1], ipv4[2], ipv4[3]); + //wwan0 sip = 10.151.137.255, tip=10.151.138.0, ipv4=10.151.137.255 +#ifndef ANDROID_gki + if (tip[0] == ipv4[0] && tip[1] == ipv4[1] && (tip[2]&0xFC) == (ipv4[2]&0xFC) && tip[3] != ipv4[3]) + reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), net, *((__be32 *)tip), sha, default_modem_addr, sha); +#endif + + if (reply) { + skb_reset_mac_header(reply); + __skb_pull(reply, skb_network_offset(reply)); + reply->ip_summed = CHECKSUM_UNNECESSARY; + reply->pkt_type = PACKET_HOST; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) + netif_rx(reply); +#else + netif_rx_ni(reply); +#endif + } + return 1; + } + + return 0; +} + +static struct sk_buff *bridge_mode_tx_fixup(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4, unsigned char *bridge_mac) { + struct ethhdr *ehdr; + const struct iphdr *iph; + + skb_reset_mac_header(skb); + ehdr = eth_hdr(skb); + + if (ehdr->h_proto == htons(ETH_P_ARP)) { + if (bridge_ipv4) + bridge_arp_reply(net, skb, bridge_ipv4); + return NULL; + } + + iph = ip_hdr(skb); + //DBG("iphdr: "); + //PrintHex((void *)iph, sizeof(struct iphdr)); + +// 1 0.000000000 0.0.0.0 255.255.255.255 DHCP 362 DHCP Request - Transaction ID 0xe7643ad7 + if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr == 0x00000000 && iph->daddr == 0xFFFFFFFF) { + //if (udp_hdr(skb)->dest == htons(67)) //DHCP Request + { + memcpy(bridge_mac, ehdr->h_source, ETH_ALEN); + pr_info("%s PC Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", netdev_name(net), + bridge_mac[0], bridge_mac[1], bridge_mac[2], bridge_mac[3], bridge_mac[4], bridge_mac[5]); + } + } + + if (memcmp(ehdr->h_source, bridge_mac, ETH_ALEN)) { + return NULL; + } + + return skb; +} + +static void bridge_mode_rx_fixup(struct mhi_netdev *mhi_netdev, struct net_device *net, struct sk_buff *skb) { + uint bridge_mode = 0; + unsigned char *bridge_mac; + + if (mhi_netdev->qmap_mode > 0) { + struct qmap_priv *priv = netdev_priv(net); + bridge_mode = priv->bridge_mode; + bridge_mac = priv->bridge_mac; + } + else { + bridge_mode = mhi_netdev->bridge_mode; + bridge_mac = mhi_netdev->bridge_mac; + } + + if (bridge_mode) + memcpy(eth_hdr(skb)->h_dest, bridge_mac, ETH_ALEN); +} + +static ssize_t bridge_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + uint bridge_mode = 0; + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + bridge_mode = priv->bridge_mode; + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + bridge_mode = mhi_netdev->bridge_mode; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", bridge_mode); +} + +static ssize_t bridge_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *ndev = to_net_dev(dev); + uint bridge_mode = simple_strtoul(buf, NULL, 0); + + if (ndev->type != ARPHRD_ETHER) { + if (bridge_mode) + netdev_err(ndev, "netdevice is not ARPHRD_ETHER\n"); + return count; + } + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + priv->bridge_mode = bridge_mode; + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + mhi_netdev->bridge_mode = bridge_mode; + } + + return count; +} + + +static ssize_t bridge_ipv4_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + unsigned int bridge_ipv4 = 0; + unsigned char ipv4[4]; + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + bridge_ipv4 = priv->bridge_ipv4; + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + bridge_ipv4 = mhi_netdev->bridge_ipv4; + } + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", ipv4[0], ipv4[1], ipv4[2], ipv4[3]); +} + +static ssize_t bridge_ipv4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *ndev = to_net_dev(dev); + + if (is_qmap_netdev(ndev)) { + struct qmap_priv *priv = netdev_priv(ndev); + priv->bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + else { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + mhi_netdev->bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + + return count; +} + +static DEVICE_ATTR(bridge_mode, S_IWUSR | S_IRUGO, bridge_mode_show, bridge_mode_store); +static DEVICE_ATTR(bridge_ipv4, S_IWUSR | S_IRUGO, bridge_ipv4_show, bridge_ipv4_store); + +#ifndef MHI_NETDEV_ONE_CARD_MODE +static struct attribute *pcie_mhi_qmap_sysfs_attrs[] = { + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, + NULL, +}; + +static struct attribute_group pcie_mhi_qmap_sysfs_attr_group = { + .attrs = pcie_mhi_qmap_sysfs_attrs, +}; +#endif +#endif + +static struct sk_buff * add_mbim_hdr(struct sk_buff *skb, u8 mux_id) { + struct mhi_mbim_hdr *mhdr; + __le32 sign; + u8 *c; + u16 tci = mux_id - QUECTEL_QMAP_MUX_ID; + unsigned int skb_len = skb->len; + + if (qmap_mode > 1) + tci += 1; //rmnet_mhi0.X map to session X + + if (skb_headroom(skb) < sizeof(struct mhi_mbim_hdr)) { + printk("skb_headroom small! headroom is %u, need %zd\n", skb_headroom(skb), sizeof(struct mhi_mbim_hdr)); + return NULL; + } + + skb_push(skb, sizeof(struct mhi_mbim_hdr)); + + mhdr = (struct mhi_mbim_hdr *)skb->data; + + //printk("%s %p\n", __func__, skb->data); + mhdr->nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); + mhdr->nth16.wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); +/* + Sequence number. The transmitter of a block shall set this to zero in the first NTB transferred after every 'function reset' event, + and shall increment for every NTB subsequently transferred. + The effect of an out-of-sequence block on the receiver is not specified. + Thespecification allows the receiver to decide whether tocheck the sequence number, + and to decide how to respond if it's incorrect. The sequence number is pri-marily supplied for debugging purposes. +*/ + //mhdr->nth16.wSequence = cpu_to_le16(mhi_netdev->tx_seq++); +/* + Size of this NTB in bytes. Represented in little-endian form. + NTB size (IN/OUT) shall not exceed dwNtbInMaxSize or dwNtbOutMaxSize respectively +*/ + mhdr->nth16.wBlockLength = cpu_to_le16(skb->len); +/* + Offset, in little endian, of the first NDP16 from byte zeroof the NTB. + This value must be a multiple of 4, and must be >= 0x000C +*/ + mhdr->nth16.wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); + + sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); + c = (u8 *)&sign; + c[3] = tci; + + mhdr->ndp16.dwSignature = sign; + mhdr->ndp16.wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16) * 2); + mhdr->ndp16.wNextNdpIndex = 0; + + mhdr->ndp16.dpe16[0].wDatagramIndex = sizeof(struct mhi_mbim_hdr); + mhdr->ndp16.dpe16[0].wDatagramLength = skb_len; + + mhdr->ndp16.dpe16[1].wDatagramIndex = 0; + mhdr->ndp16.dpe16[1].wDatagramLength = 0; + + return skb; +} + +static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id) { + struct qmap_hdr *qhdr; + int pad = 0; + + pad = skb->len%4; + if (pad) { + pad = 4 - pad; + if (skb_tailroom(skb) < pad) { + printk("skb_tailroom small!\n"); + pad = 0; + } + if (pad) + __skb_put(skb, pad); + } + + qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr)); + qhdr->cd_rsvd_pad = pad; + qhdr->mux_id = mux_id; + qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr)); + + return skb; +} + +static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) { + struct rmnet_map_header *map_header; + struct rmnet_map_v5_csum_header *ul_header; + u32 padding, map_datalen; + + map_datalen = skb->len; + padding = map_datalen%4; + if (padding) { + padding = 4 - padding; + if (skb_tailroom(skb) < padding) { + printk("skb_tailroom small!\n"); + padding = 0; + } + if (padding) + __skb_put(skb, padding); + } + + map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header))); + map_header->cd_bit = 0; + map_header->next_hdr = 1; + map_header->pad_len = padding; + map_header->mux_id = mux_id; + map_header->pkt_len = htons(map_datalen + padding); + + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + if (skb->ip_summed == CHECKSUM_PARTIAL) { +#if 0 //TODO + skb->ip_summed = CHECKSUM_NONE; + /* Ask for checksum offloading */ + ul_header->csum_valid_required = 1; +#endif + } + + return skb; +} + +static void rmnet_map_send_ack(struct mhi_netdev *pQmapDev, + unsigned char type, + struct rmnet_map_header *map_header) +{ + struct rmnet_map_control_command *cmd; + struct sk_buff *skb; + size_t skb_len = sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_control_command); + + skb = alloc_skb(skb_len, GFP_ATOMIC); + if (!skb) + return; + + skb_put(skb, skb_len); + memcpy(skb->data, map_header, skb_len); + cmd = (struct rmnet_map_control_command *)(skb->data + sizeof(struct rmnet_map_header)); + cmd->cmd_type = type & 0x03; + skb->protocol = htons(ETH_P_MAP); + skb->dev = pQmapDev->ndev; + dev_queue_xmit(skb); +} + +static int rmnet_data_vnd_do_flow_control(struct net_device *dev, + uint32_t map_flow_id, + uint16_t v4_seq, + uint16_t v6_seq, + int enable) +{ + //TODO + return 0; +} + +static uint8_t rmnet_map_do_flow_control(struct mhi_netdev *pQmapDev, + struct rmnet_map_header *map_header, + int enable) { + struct net_device *ndev = pQmapDev->ndev; + struct rmnet_map_control_command *cmd; + struct net_device *vnd; + uint8_t mux_id; + uint16_t ip_family; + uint16_t fc_seq; + uint32_t qos_id; + int r; + + cmd = (struct rmnet_map_control_command *)(map_header + 1); + + mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID; + if (mux_id >= pQmapDev->qmap_mode) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } + + vnd = pQmapDev->mpQmapNetDev[mux_id]; + if (vnd == NULL) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } + + ip_family = cmd->flow_control.ip_family; + fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); + qos_id = ntohl(cmd->flow_control.qos_id); + + if (enable) + pQmapDev->flow_control |= (1 << mux_id); + else + pQmapDev->flow_control &= ~(1 << mux_id); + /* Ignore the ip family and pass the sequence number for both v4 and v6 + * sequence. User space does not support creating dedicated flows for + * the 2 protocols + */ + r = rmnet_data_vnd_do_flow_control(vnd, qos_id, fc_seq, fc_seq, enable); + netdev_dbg(vnd, "qos_id:0x%08X, ip_family:%hd, fc_seq %hd, en:%d", + qos_id, ip_family & 3, fc_seq, enable); + + return RMNET_MAP_COMMAND_ACK; +} + +static void rmnet_data_map_command(struct mhi_netdev *pQmapDev, + struct rmnet_map_header *map_header) { + struct net_device *ndev = pQmapDev->ndev; + struct rmnet_map_control_command *cmd; + unsigned char command_name; + unsigned char rc = 0; + + cmd = (struct rmnet_map_control_command *)(map_header + 1); + command_name = cmd->command_name; + + if (command_name < RMNET_MAP_COMMAND_ENUM_LENGTH) + pQmapDev->rmnet_map_command_stats[command_name]++; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_map_do_flow_control(pQmapDev, map_header, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_map_do_flow_control(pQmapDev, map_header, 0); + break; + + default: + pQmapDev->rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]++; + netdev_info(ndev, "UNSupport MAP command: %d", command_name); + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + break; + } + + if (rc == RMNET_MAP_COMMAND_ACK) + rmnet_map_send_ack(pQmapDev, rc, map_header); + + return; +} + +#ifndef MHI_NETDEV_ONE_CARD_MODE +static void rmnet_vnd_upate_rx_stats(struct net_device *net, + unsigned rx_packets, unsigned rx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct qmap_priv *dev = netdev_priv(net); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + + u64_stats_update_begin(&stats64->syncp); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + stats64->rx_packets += rx_packets; + stats64->rx_bytes += rx_bytes; +#else + u64_stats_add(&stats64->rx_packets, rx_packets); + u64_stats_add(&stats64->rx_bytes, rx_bytes); +#endif + u64_stats_update_end(&stats64->syncp); +#else + priv->self_dev->stats.rx_packets += rx_packets; + priv->self_dev->stats.rx_bytes += rx_bytes; +#endif +} + +static void rmnet_vnd_upate_tx_stats(struct net_device *net, + unsigned tx_packets, unsigned tx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct qmap_priv *dev = netdev_priv(net); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + + u64_stats_update_begin(&stats64->syncp); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + stats64->tx_packets += tx_packets; + stats64->tx_bytes += tx_bytes; +#else + u64_stats_add(&stats64->tx_packets, tx_packets); + u64_stats_add(&stats64->tx_bytes, tx_bytes); +#endif + u64_stats_update_end(&stats64->syncp); +#else + net->stats.rx_packets += tx_packets; + net->stats.rx_bytes += tx_bytes; +#endif +} + +#if defined(MHI_NETDEV_STATUS64) +#ifdef ANDROID_gki +static void _netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, + const struct net_device_stats *netdev_stats) +{ +#if BITS_PER_LONG == 64 + BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); + memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); + /* zero out counters that only exist in rtnl_link_stats64 */ + memset((char *)stats64 + sizeof(*netdev_stats), 0, + sizeof(*stats64) - sizeof(*netdev_stats)); +#else + size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long); + const unsigned long *src = (const unsigned long *)netdev_stats; + u64 *dst = (u64 *)stats64; + + BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); + for (i = 0; i < n; i++) + dst[i] = src[i]; + /* zero out counters that only exist in rtnl_link_stats64 */ + memset((char *)stats64 + n * sizeof(u64), 0, + sizeof(*stats64) - n * sizeof(u64)); +#endif +} +#else +static void my_netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, + const struct net_device_stats *netdev_stats) +{ + netdev_stats_to_stats64(stats64, netdev_stats); +} +#endif + +static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) +{ + struct qmap_priv *dev = netdev_priv(net); + unsigned int start; + int cpu; + + my_netdev_stats_to_stats64(stats, &net->stats); + + if (nss_cb && dev->use_qca_nss) { // rmnet_nss.c:rmnet_nss_tx() will update rx stats + stats->rx_packets = 0; + stats->rx_bytes = 0; + } + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats64; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(dev->stats64, cpu); + + do { + start = u64_stats_fetch_begin_irq(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; +#else + u64_stats_t rx_packets, rx_bytes; + u64_stats_t tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(dev->stats64, cpu); + + do { + start = u64_stats_fetch_begin(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry(&stats64->syncp, start)); + + stats->rx_packets += u64_stats_read(&rx_packets); + stats->rx_bytes += u64_stats_read(&rx_bytes); + stats->tx_packets += u64_stats_read(&tx_packets); + stats->tx_bytes += u64_stats_read(&tx_bytes); +#endif + } + + return stats; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,10,0 )) //bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 +static void rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + _rmnet_vnd_get_stats64(net, stats); +} +#else +static struct rtnl_link_stats64 *rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + return _rmnet_vnd_get_stats64(net, stats); +} +#endif +#endif + +static void rmnet_vnd_tx_agg_work(struct work_struct *work) +{ + struct qmap_priv *priv = + container_of(work, struct qmap_priv, agg_wq); + struct sk_buff *skb = NULL; + unsigned long flags; + + spin_lock_irqsave(&priv->agg_lock, flags); + if (likely(priv->agg_skb)) { + skb = priv->agg_skb; + priv->agg_skb = NULL; + priv->agg_count = 0; + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + ktime_get_ts64(&priv->agg_time); + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (skb) + dev_queue_xmit(skb); +} + +static enum hrtimer_restart rmnet_vnd_tx_agg_timer_cb(struct hrtimer *timer) +{ + struct qmap_priv *priv = + container_of(timer, struct qmap_priv, agg_hrtimer); + + schedule_work(&priv->agg_wq); + return HRTIMER_NORESTART; +} + +static int rmnet_vnd_tx_agg(struct sk_buff *skb, struct qmap_priv *priv) { + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + + return dev_queue_xmit(skb); +} + + +static int rmnet_vnd_open(struct net_device *dev) +{ + struct qmap_priv *priv = netdev_priv(dev); + struct net_device *real_dev = priv->real_dev; + + if (!(priv->real_dev->flags & IFF_UP)) + return -ENETDOWN; + + if (netif_carrier_ok(real_dev)) + netif_carrier_on(dev); + + return 0; +} + +static int rmnet_vnd_stop(struct net_device *pNet) +{ + netif_carrier_off(pNet); + return 0; +} + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *pNet) +{ + int err; + struct qmap_priv *priv = netdev_priv(pNet); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(priv->real_dev); + int skb_len = skb->len; + + if (netif_queue_stopped(priv->real_dev)) { + netif_stop_queue(pNet); + return NETDEV_TX_BUSY; + } + + //printk("%s 1 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (pNet->type == ARPHRD_ETHER) { + skb_reset_mac_header(skb); + +#ifdef QUECTEL_BRIDGE_MODE + if (priv->bridge_mode && bridge_mode_tx_fixup(pNet, skb, priv->bridge_ipv4, priv->bridge_mac) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } +#endif + + if (skb_pull(skb, ETH_HLEN) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + //printk("%s 2 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (mhi_netdev->net_type == MHI_NET_MBIM) { + if (add_mbim_hdr(skb, priv->mux_id) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + else { + if (priv->qmap_version == 5) { + add_qhdr(skb, priv->mux_id); + } + else if (priv->qmap_version == 9) { + add_qhdr_v5(skb, priv->mux_id); + } + else { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + err = rmnet_vnd_tx_agg(skb, priv); + if (err == NET_XMIT_SUCCESS) { + rmnet_vnd_upate_tx_stats(pNet, 1, skb_len); + } else { + pNet->stats.tx_errors++; + } + + return err; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + struct mhi_netdev *mhi_netdev; + + mhi_netdev = (struct mhi_netdev *)ndev_to_mhi(rmnet_dev); + + if (mhi_netdev == NULL) { + printk("warning, mhi_netdev == null\n"); + return -EINVAL; + } + + if (new_mtu < 0 ) + return -EINVAL; + + if (new_mtu > mhi_netdev->max_mtu) { + printk("warning, set mtu=%d greater than max mtu=%d\n", new_mtu, mhi_netdev->max_mtu); + return -EINVAL; + } + + rmnet_dev->mtu = new_mtu; + return 0; +} + +/* drivers may override default ethtool_ops in their bind() routine */ +static const struct ethtool_ops rmnet_vnd_ethtool_ops = { + .get_link = ethtool_op_get_link, +}; + +static void rmnet_vnd_rawip_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->needed_headroom = 16; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ +//for Qualcomm's NSS, must set type as ARPHRD_RAWIP, or NSS performace is very bad. + rmnet_dev->type = ARPHRD_RAWIP; // do not support moify mac, for dev_set_mac_address() need ARPHRD_ETHER + rmnet_dev->hard_header_len = 0; +//for Qualcomm's SFE, do not add IFF_POINTOPOINT to type, or SFE donot work. + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); +} + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_open = rmnet_vnd_open, + .ndo_stop = rmnet_vnd_stop, + .ndo_start_xmit = rmnet_vnd_start_xmit, +#if defined(MHI_NETDEV_STATUS64) + .ndo_get_stats64 = rmnet_vnd_get_stats64, +#endif + .ndo_change_mtu = rmnet_vnd_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static rx_handler_result_t qca_nss_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + + if (!skb) + return RX_HANDLER_CONSUMED; + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + /* Check this so that we dont loop around netif_receive_skb */ + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + return RX_HANDLER_PASS; + } + + if (nss_cb) { + nss_cb->nss_tx(skb); + return RX_HANDLER_CONSUMED; + } + + return RX_HANDLER_PASS; +} +#endif + +static void rmnet_mbim_rx_handler(void *dev, struct sk_buff *skb_in) +{ + struct mhi_netdev *pQmapDev = (struct mhi_netdev *)dev; + struct mhi_netdev *mhi_netdev = (struct mhi_netdev *)dev; + MHI_MBIM_CTX *ctx = &pQmapDev->mbim_ctx; + //struct net_device *ndev = pQmapDev->ndev; + struct usb_cdc_ncm_nth16 *nth16; + int ndpoffset, len; + u16 wSequence; + struct sk_buff_head skb_chain; + struct sk_buff *qmap_skb; + + __skb_queue_head_init(&skb_chain); + + if (skb_in->len < (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16))) { + MSG_ERR("frame too short\n"); + goto error; + } + + nth16 = (struct usb_cdc_ncm_nth16 *)skb_in->data; + + if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) { + MSG_ERR("invalid NTH16 signature <%#010x>\n", le32_to_cpu(nth16->dwSignature)); + goto error; + } + + len = le16_to_cpu(nth16->wBlockLength); + if (len > ctx->rx_max) { + MSG_ERR("unsupported NTB block length %u/%u\n", len, ctx->rx_max); + goto error; + } + + wSequence = le16_to_cpu(nth16->wSequence); + if (ctx->rx_seq != wSequence) { + MSG_ERR("sequence number glitch prev=%d curr=%d\n", ctx->rx_seq, wSequence); + } + ctx->rx_seq = wSequence + 1; + + ndpoffset = nth16->wNdpIndex; + + while (ndpoffset > 0) { + struct usb_cdc_ncm_ndp16 *ndp16 ; + struct usb_cdc_ncm_dpe16 *dpe16; + int nframes, x; + u8 *c; + u16 tci = 0; + struct net_device *qmap_net; + + if (skb_in->len < (ndpoffset + sizeof(struct usb_cdc_ncm_ndp16))) { + MSG_ERR("invalid NDP offset <%u>\n", ndpoffset); + goto error; + } + + ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset); + + if (le16_to_cpu(ndp16->wLength) < 0x10) { + MSG_ERR("invalid DPT16 length <%u>\n", le16_to_cpu(ndp16->wLength)); + goto error; + } + + nframes = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16)); + + if (skb_in->len < (sizeof(struct usb_cdc_ncm_ndp16) + nframes * (sizeof(struct usb_cdc_ncm_dpe16)))) { + MSG_ERR("Invalid nframes = %d\n", nframes); + goto error; + } + + switch (ndp16->dwSignature & cpu_to_le32(0x00ffffff)) { + case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN): + c = (u8 *)&ndp16->dwSignature; + tci = c[3]; + /* tag IPS<0> packets too if MBIM_IPS0_VID exists */ + //if (!tci && info->flags & FLAG_IPS0_VLAN) + // tci = MBIM_IPS0_VID; + break; + case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN): + c = (u8 *)&ndp16->dwSignature; + tci = c[3] + 256; + break; + default: + MSG_ERR("unsupported NDP signature <0x%08x>\n", le32_to_cpu(ndp16->dwSignature)); + goto error; + } + + if ((qmap_mode == 1 && tci != 0) || (qmap_mode > 1 && tci > qmap_mode)) { + MSG_ERR("unsupported tci %d by now\n", tci); + goto error; + } + tci = abs(tci); + qmap_net = pQmapDev->mpQmapNetDev[qmap_mode == 1 ? 0 : tci - 1]; + + dpe16 = ndp16->dpe16; + + for (x = 0; x < nframes; x++, dpe16++) { + int offset = le16_to_cpu(dpe16->wDatagramIndex); + int skb_len = le16_to_cpu(dpe16->wDatagramLength); + + if (offset == 0 || skb_len == 0) { + break; + } + + /* sanity checking */ + if (((offset + skb_len) > skb_in->len) || (skb_len > ctx->rx_max)) { + MSG_ERR("invalid frame detected (ignored) x=%d, offset=%d, skb_len=%u\n", x, offset, skb_len); + goto error; + } + + qmap_skb = netdev_alloc_skb(qmap_net, skb_len); + if (!qmap_skb) { + mhi_netdev->stats.alloc_failed++; + //MSG_ERR("skb_clone fail\n"); //do not print in softirq + goto error; + } + + switch (skb_in->data[offset] & 0xf0) { + case 0x40: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[offset]); + if (ip4h->protocol == IPPROTO_ICMP) { + qmap_skb->cb[0] = 1; + } + } +#endif + qmap_skb->protocol = htons(ETH_P_IP); + break; + case 0x60: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(&skb_in->data[offset]); + if (ip6h->nexthdr == NEXTHDR_ICMP) { + qmap_skb->cb[0] = 1; + } + } +#endif + qmap_skb->protocol = htons(ETH_P_IPV6); + break; + default: + MSG_ERR("unknow skb->protocol %02x\n", skb_in->data[offset]); + goto error; + } + + skb_put(qmap_skb, skb_len); + memcpy(qmap_skb->data, skb_in->data + offset, skb_len); + + skb_reset_transport_header(qmap_skb); + skb_reset_network_header(qmap_skb); + qmap_skb->pkt_type = PACKET_HOST; + skb_set_mac_header(qmap_skb, 0); + + if (qmap_skb->dev->type == ARPHRD_ETHER) { + skb_push(qmap_skb, ETH_HLEN); + skb_reset_mac_header(qmap_skb); + memcpy(eth_hdr(qmap_skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(qmap_skb)->h_dest, qmap_net->dev_addr, ETH_ALEN); + eth_hdr(qmap_skb)->h_proto = qmap_skb->protocol; +#ifdef QUECTEL_BRIDGE_MODE + bridge_mode_rx_fixup(pQmapDev, qmap_net, qmap_skb); +#endif + __skb_pull(qmap_skb, ETH_HLEN); + } + +#ifndef MHI_NETDEV_ONE_CARD_MODE + rmnet_vnd_upate_rx_stats(qmap_net, 1, skb_len); +#endif + __skb_queue_tail(&skb_chain, qmap_skb); + } + + /* are there more NDPs to process? */ + ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex); + } + +error: + while ((qmap_skb = __skb_dequeue (&skb_chain))) { + netif_receive_skb(qmap_skb); + } +} + +static void rmnet_qmi_rx_handler(void *dev, struct sk_buff *skb_in) +{ + struct mhi_netdev *pQmapDev = (struct mhi_netdev *)dev; + struct net_device *ndev = pQmapDev->ndev; + struct sk_buff *qmap_skb; + struct sk_buff_head skb_chain; + uint dl_minimum_padding = 0; + + if (pQmapDev->qmap_version == 9) + dl_minimum_padding = pQmapDev->dl_minimum_padding; + + __skb_queue_head_init(&skb_chain); + + while (skb_in->len > sizeof(struct qmap_hdr)) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb_in->data; + struct rmnet_map_v5_csum_header *ul_header = NULL; + size_t hdr_size = sizeof(struct rmnet_map_header); + struct net_device *qmap_net; + int pkt_len = ntohs(map_header->pkt_len); + int skb_len; + __be16 protocol; + int mux_id; + int skip_nss = 0; + + if (map_header->next_hdr) { + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + hdr_size += sizeof(struct rmnet_map_v5_csum_header); + } + + skb_len = pkt_len - (map_header->pad_len&0x3F); +#if 0 //just for debug dl_minimum_padding BUG + if ((skb_in->data[hdr_size] & 0xf0) == 0x45) { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[hdr_size]); + if (ntohs(ip4h->tot_len) != skb_len) { + netdev_info(ndev, "tot_len=%d skb_len=%d\n", ntohs(ip4h->tot_len), skb_len); + } + } +#endif + skb_len -= dl_minimum_padding; + + mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID; + if (mux_id >= pQmapDev->qmap_mode) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto error_pkt; + } + mux_id = abs(mux_id); + qmap_net = pQmapDev->mpQmapNetDev[mux_id]; + if (qmap_net == NULL) { + netdev_info(ndev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto skip_pkt; + } + + if (skb_len > qmap_net->mtu) { + netdev_info(ndev, "drop skb_len=%x larger than qmap mtu=%d\n", skb_len, qmap_net->mtu); + goto error_pkt; + } + + if (skb_in->len < (pkt_len + hdr_size)) { + netdev_info(ndev, "drop qmap unknow pkt, len=%d, pkt_len=%d\n", skb_in->len, pkt_len); + goto error_pkt; + } + + if (map_header->cd_bit) { + rmnet_data_map_command(pQmapDev, map_header); + goto skip_pkt; + } + + switch (skb_in->data[hdr_size] & 0xf0) { + case 0x40: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[hdr_size]); + if (ip4h->protocol == IPPROTO_ICMP) { + skip_nss = 1; + } + } +#endif + protocol = htons(ETH_P_IP); + break; + case 0x60: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(&skb_in->data[hdr_size]); + if (ip6h->nexthdr == NEXTHDR_ICMP) { + skip_nss = 1; + } + } +#endif + protocol = htons(ETH_P_IPV6); + break; + default: + netdev_info(ndev, "unknow skb->protocol %02x\n", skb_in->data[hdr_size]); + goto error_pkt; + } + +//for Qualcomm's SFE, do not use skb_clone(), or SFE 's performace is very bad. +//for Qualcomm's NSS, do not use skb_clone(), or NSS 's performace is very bad. + qmap_skb = netdev_alloc_skb(qmap_net, skb_len); + if (qmap_skb) { + skb_put(qmap_skb, skb_len); + memcpy(qmap_skb->data, skb_in->data + hdr_size, skb_len); + } + + if (qmap_skb == NULL) { + pQmapDev->stats.alloc_failed++; + //netdev_info(ndev, "fail to alloc skb, pkt_len = %d\n", skb_len); //do not print in softirq + goto error_pkt; + } + + skb_reset_transport_header(qmap_skb); + skb_reset_network_header(qmap_skb); + qmap_skb->pkt_type = PACKET_HOST; + skb_set_mac_header(qmap_skb, 0); + qmap_skb->protocol = protocol; + + if(skip_nss) + qmap_skb->cb[0] = 1; + + if (ul_header && ul_header->header_type == RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD + && ul_header->csum_valid_required) { +#if 0 //TODO + qmap_skb->ip_summed = CHECKSUM_UNNECESSARY; +#endif + } + + if (qmap_skb->dev->type == ARPHRD_ETHER) { + skb_push(qmap_skb, ETH_HLEN); + skb_reset_mac_header(qmap_skb); + memcpy(eth_hdr(qmap_skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(qmap_skb)->h_dest, qmap_net->dev_addr, ETH_ALEN); + eth_hdr(qmap_skb)->h_proto = protocol; +#ifdef QUECTEL_BRIDGE_MODE + bridge_mode_rx_fixup(pQmapDev, qmap_net, qmap_skb); +#endif + __skb_pull(qmap_skb, ETH_HLEN); + } + +#ifndef MHI_NETDEV_ONE_CARD_MODE + rmnet_vnd_upate_rx_stats(qmap_net, 1, skb_len); +#endif + __skb_queue_tail(&skb_chain, qmap_skb); + +skip_pkt: + skb_pull(skb_in, pkt_len + hdr_size); + } + +error_pkt: + while ((qmap_skb = __skb_dequeue (&skb_chain))) { + netif_receive_skb(qmap_skb); + } +} + +#ifndef MHI_NETDEV_ONE_CARD_MODE +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct mhi_netdev *mhi_netdev; + + if (!skb) + goto done; + + //printk("%s skb=%p, protocol=%x, len=%d\n", __func__, skb, skb->protocol, skb->len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + if (skb->protocol != htons(ETH_P_MAP)) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + /* when open hyfi function, run cm will make system crash */ + //dev = rcu_dereference(skb->dev->rx_handler_data); + mhi_netdev = (struct mhi_netdev *)ndev_to_mhi(skb->dev); + + if (mhi_netdev == NULL) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + + if (mhi_netdev->net_type == MHI_NET_MBIM) + rmnet_mbim_rx_handler(mhi_netdev, skb); + else + rmnet_qmi_rx_handler(mhi_netdev, skb); + + if (!skb_cloned(skb)) { + if (skb_queue_len(&mhi_netdev->rx_allocated) < 128) { + skb->data = skb->head; + skb_reset_tail_pointer(skb); + skb->len = 0; + skb_queue_tail(&mhi_netdev->rx_allocated, skb); + return RX_HANDLER_CONSUMED; + } + } + + consume_skb(skb); + +done: + return RX_HANDLER_CONSUMED; +} + +static struct net_device * rmnet_vnd_register_device(struct mhi_netdev *pQmapDev, u8 offset_id, u8 mux_id) +{ + struct net_device *real_dev = pQmapDev->ndev; + struct net_device *qmap_net; + struct qmap_priv *priv; + int err; + int use_qca_nss = !!nss_cb; + unsigned char temp_addr[ETH_ALEN]; + + qmap_net = alloc_etherdev(sizeof(*priv)); + if (!qmap_net) + return NULL; + + SET_NETDEV_DEV(qmap_net, &real_dev->dev); + priv = netdev_priv(qmap_net); + priv->offset_id = offset_id; + priv->real_dev = pQmapDev->ndev; + priv->self_dev = qmap_net; + priv->pQmapDev = pQmapDev; + priv->qmap_version = pQmapDev->qmap_version; + priv->mux_id = mux_id; + sprintf(qmap_net->name, "%.12s.%d", real_dev->name, offset_id + 1); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + __dev_addr_set(qmap_net, real_dev->dev_addr, ETH_ALEN); +#else + memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN); +#endif + //qmap_net->dev_addr[5] = offset_id + 1; + //eth_random_addr(qmap_net->dev_addr); + memcpy(temp_addr, qmap_net->dev_addr, ETH_ALEN); + temp_addr[5] = offset_id + 1; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + __dev_addr_set(qmap_net, temp_addr, ETH_ALEN); +#else + memcpy(qmap_net->dev_addr, temp_addr, ETH_ALEN); +#endif +#if defined(MHI_NETDEV_STATUS64) + priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!priv->stats64) + goto out_free_newdev; +#endif + +#ifdef QUECTEL_BRIDGE_MODE + priv->bridge_mode = !!(pQmapDev->bridge_mode & BIT(offset_id)); + qmap_net->sysfs_groups[0] = &pcie_mhi_qmap_sysfs_attr_group; + if (priv->bridge_mode) + use_qca_nss = 0; +#endif + + priv->agg_skb = NULL; + priv->agg_count = 0; + hrtimer_init(&priv->agg_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + priv->agg_hrtimer.function = rmnet_vnd_tx_agg_timer_cb; + INIT_WORK(&priv->agg_wq, rmnet_vnd_tx_agg_work); + ktime_get_ts64(&priv->agg_time); + spin_lock_init(&priv->agg_lock); + priv->use_qca_nss = 0; + + qmap_net->ethtool_ops = &rmnet_vnd_ethtool_ops; + qmap_net->netdev_ops = &rmnet_vnd_ops; + qmap_net->flags |= IFF_NOARP; + qmap_net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) + qmap_net->max_mtu = pQmapDev->max_mtu; +#endif + + if (nss_cb && use_qca_nss) { + rmnet_vnd_rawip_setup(qmap_net); + } +#ifdef CONFIG_PINCTRL_IPQ9574 + rmnet_vnd_rawip_setup(qmap_net); +#endif + if (pQmapDev->net_type == MHI_NET_MBIM) { + qmap_net->needed_headroom = sizeof(struct mhi_mbim_hdr); + } + + err = register_netdev(qmap_net); + pr_info("%s(%s)=%d\n", __func__, qmap_net->name, err); + if (err == -EEXIST) { + //'ifdown wan' for openwrt, why? + } + if (err < 0) + goto out_free_newdev; + + netif_device_attach (qmap_net); + netif_carrier_off(qmap_net); + + if (nss_cb && use_qca_nss) { + int rc = nss_cb->nss_create(qmap_net); + WARN_ON(rc); + if (rc) { + /* Log, but don't fail the device creation */ + netdev_err(qmap_net, "Device will not use NSS path: %d\n", rc); + } else { + priv->use_qca_nss = 1; + netdev_info(qmap_net, "NSS context created\n"); + rtnl_lock(); + netdev_rx_handler_register(qmap_net, qca_nss_rx_handler, NULL); + rtnl_unlock(); + } + } + + return qmap_net; + +out_free_newdev: + free_netdev(qmap_net); + return qmap_net; +} + +static void rmnet_vnd_unregister_device(struct net_device *qmap_net) { + struct qmap_priv *priv; + unsigned long flags; + + pr_info("%s(%s)\n", __func__, qmap_net->name); + netif_carrier_off(qmap_net); + + priv = netdev_priv(qmap_net); + hrtimer_cancel(&priv->agg_hrtimer); + cancel_work_sync(&priv->agg_wq); + + spin_lock_irqsave(&priv->agg_lock, flags); + if (priv->agg_skb) { + kfree_skb(priv->agg_skb); + priv->agg_skb = NULL; + priv->agg_count = 0; + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (nss_cb && priv->use_qca_nss) { + rtnl_lock(); + netdev_rx_handler_unregister(qmap_net); + rtnl_unlock(); + nss_cb->nss_free(qmap_net); + } +#if defined(MHI_NETDEV_STATUS64) + free_percpu(priv->stats64); +#endif + unregister_netdev (qmap_net); + free_netdev(qmap_net); +} +#endif + +static void rmnet_info_set(struct mhi_netdev *pQmapDev, RMNET_INFO *rmnet_info) +{ + rmnet_info->size = sizeof(RMNET_INFO); + rmnet_info->rx_urb_size = pQmapDev->qmap_size; + rmnet_info->ep_type = 3; //DATA_EP_TYPE_PCIE + rmnet_info->iface_id = 4; + rmnet_info->qmap_mode = pQmapDev->qmap_mode; + rmnet_info->qmap_version = pQmapDev->qmap_version; + rmnet_info->dl_minimum_padding = 0; +} + +static ssize_t qmap_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + return snprintf(buf, PAGE_SIZE, "%u\n", mhi_netdev->qmap_mode); +} + +static DEVICE_ATTR(qmap_mode, S_IRUGO, qmap_mode_show, NULL); + +static ssize_t qmap_size_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + return snprintf(buf, PAGE_SIZE, "%u\n", mhi_netdev->qmap_size); +} + +static DEVICE_ATTR(qmap_size, S_IRUGO, qmap_size_show, NULL); + +static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + return snprintf(buf, PAGE_SIZE, "0x%x\n", mhi_netdev->link_state); +} + +static ssize_t link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *ndev = to_net_dev(dev); + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + //struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + unsigned link_state = 0; + unsigned old_link = mhi_netdev->link_state; + uint offset_id = 0; + + link_state = simple_strtoul(buf, NULL, 0); + if (mhi_netdev->qmap_mode > 1) { + offset_id = ((link_state&0xF) - 1); + + if (0 < link_state && link_state <= mhi_netdev->qmap_mode) + mhi_netdev->link_state |= (1 << offset_id); + else if (0x80 < link_state && link_state <= (0x80 + mhi_netdev->qmap_mode)) + mhi_netdev->link_state &= ~(1 << offset_id); + } + else { + mhi_netdev->link_state = !!link_state; + } + + if (old_link != mhi_netdev->link_state) { + struct net_device *qmap_net = mhi_netdev->mpQmapNetDev[offset_id]; + + if (mhi_netdev->link_state) + netif_carrier_on(mhi_netdev->ndev); + else { + netif_carrier_off(mhi_netdev->ndev); + } + + if (qmap_net) { + if (mhi_netdev->link_state & (1 << offset_id)) + netif_carrier_on(qmap_net); + else + netif_carrier_off(qmap_net); + } + + dev_info(dev, "link_state 0x%x -> 0x%x\n", old_link, mhi_netdev->link_state); + } + + return count; +} + +static DEVICE_ATTR(link_state, S_IWUSR | S_IRUGO, link_state_show, link_state_store); + +static struct attribute *pcie_mhi_sysfs_attrs[] = { + &dev_attr_qmap_mode.attr, + &dev_attr_qmap_size.attr, + &dev_attr_link_state.attr, +#ifdef QUECTEL_BRIDGE_MODE + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, +#endif + NULL, +}; + +static struct attribute_group pcie_mhi_sysfs_attr_group = { + .attrs = pcie_mhi_sysfs_attrs, +}; + +static void mhi_netdev_upate_rx_stats(struct mhi_netdev *mhi_netdev, + unsigned rx_packets, unsigned rx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64); + + u64_stats_update_begin(&stats64->syncp); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + stats64->rx_packets += rx_packets; + stats64->rx_bytes += rx_bytes; +#else + u64_stats_add(&stats64->rx_packets, rx_packets); + u64_stats_add(&stats64->rx_bytes, rx_bytes); +#endif + u64_stats_update_begin(&stats64->syncp); +#else + mhi_netdev->ndev->stats.rx_packets += rx_packets; + mhi_netdev->ndev->stats.rx_bytes += rx_bytes; +#endif +} + +static void mhi_netdev_upate_tx_stats(struct mhi_netdev *mhi_netdev, + unsigned tx_packets, unsigned tx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(mhi_netdev->stats64); + + u64_stats_update_begin(&stats64->syncp); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + stats64->tx_packets += tx_packets; + stats64->tx_bytes += tx_bytes; +#else + u64_stats_add(&stats64->tx_packets, tx_packets); + u64_stats_add(&stats64->tx_bytes, tx_bytes); +#endif + u64_stats_update_begin(&stats64->syncp); +#else + mhi_netdev->ndev->stats.tx_packets += tx_packets; + mhi_netdev->ndev->stats.tx_bytes += tx_bytes; +#endif +} + +static __be16 mhi_netdev_ip_type_trans(u8 data) +{ + __be16 protocol = 0; + + /* determine L3 protocol */ + switch (data & 0xf0) { + case 0x40: + protocol = htons(ETH_P_IP); + break; + case 0x60: + protocol = htons(ETH_P_IPV6); + break; + default: + protocol = htons(ETH_P_MAP); + break; + } + + return protocol; +} + +static int mhi_netdev_alloc_skb(struct mhi_netdev *mhi_netdev, gfp_t gfp_t) +{ + u32 cur_mru = mhi_netdev->mru; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct mhi_skb_priv *skb_priv; + int ret; + struct sk_buff *skb; + int no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + int i; + + for (i = 0; i < no_tre; i++) { + skb = skb_dequeue(&mhi_netdev->rx_allocated); + if (!skb) { + skb = alloc_skb(/*32+*/cur_mru, gfp_t); + if (skb) + mhi_netdev->stats.rx_allocated++; + } + if (!skb) + return -ENOMEM; + + read_lock_bh(&mhi_netdev->pm_lock); + if (unlikely(!mhi_netdev->enabled)) { + MSG_ERR("Interface not enabled\n"); + ret = -EIO; + goto error_queue; + } + + skb_priv = (struct mhi_skb_priv *)skb->cb; + skb_priv->buf = skb->data; + skb_priv->size = cur_mru; + skb_priv->bind_netdev = mhi_netdev; + skb->dev = mhi_netdev->ndev; + //skb_reserve(skb, 32); //for ethernet header + + spin_lock_bh(&mhi_netdev->rx_lock); + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, skb, + skb_priv->size, MHI_EOT); + spin_unlock_bh(&mhi_netdev->rx_lock); + + if (ret) { + skb_priv->bind_netdev = NULL; + MSG_ERR("Failed to queue skb, ret:%d\n", ret); + ret = -EIO; + goto error_queue; + } + + read_unlock_bh(&mhi_netdev->pm_lock); + } + + return 0; + +error_queue: + skb->destructor = NULL; + read_unlock_bh(&mhi_netdev->pm_lock); + dev_kfree_skb_any(skb); + + return ret; +} + +static void mhi_netdev_alloc_work(struct work_struct *work) +{ + struct mhi_netdev *mhi_netdev = container_of(work, struct mhi_netdev, + alloc_work.work); + /* sleep about 1 sec and retry, that should be enough time + * for system to reclaim freed memory back. + */ + const int sleep_ms = 1000; + int retry = 60; + int ret; + + MSG_LOG("Entered\n"); + do { + ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_KERNEL); + /* sleep and try again */ + if (ret == -ENOMEM) { + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); + return; + msleep(sleep_ms); + retry--; + } + } while (ret == -ENOMEM && retry); + + MSG_LOG("Exit with status:%d retry:%d\n", ret, retry); +} + +static void mhi_netdev_dealloc(struct mhi_netdev *mhi_netdev) +{ + struct sk_buff *skb; + + skb = skb_dequeue(&mhi_netdev->rx_allocated); + while (skb) { + skb->destructor = NULL; + kfree_skb(skb); + skb = skb_dequeue(&mhi_netdev->rx_allocated); + } +} + +static int mhi_netdev_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct sk_buff *skb; + int rx_work = 0; + int ret; + + MSG_VERB("Entered\n"); + + rx_work = mhi_poll(mhi_dev, budget); + + if (rx_work < 0) { + MSG_ERR("Error polling ret:%d\n", rx_work); + napi_complete(napi); + return 0; + } + + if (mhi_netdev->net_type == MHI_NET_MBIM || mhi_netdev->net_type == MHI_NET_RMNET) { + while ((skb = skb_dequeue (&mhi_netdev->qmap_chain))) { +#ifdef MHI_NETDEV_ONE_CARD_MODE + int recly_skb = 0; + + mhi_netdev_upate_rx_stats(mhi_netdev, 1, skb->len); + if (mhi_netdev->net_type == MHI_NET_MBIM) + rmnet_mbim_rx_handler(mhi_netdev, skb); + else + rmnet_qmi_rx_handler(mhi_netdev, skb); + + if (!skb_cloned(skb)) { + if (skb_queue_len(&mhi_netdev->rx_allocated) < 128) { + skb->data = skb->head; + skb_reset_tail_pointer(skb); + skb->len = 0; + skb_queue_tail(&mhi_netdev->rx_allocated, skb); + recly_skb = 1; + } + } + if (recly_skb == 0) + dev_kfree_skb(skb); +#else + mhi_netdev_upate_rx_stats(mhi_netdev, 1, skb->len); + skb->dev = mhi_netdev->ndev; + skb->protocol = htons(ETH_P_MAP); + netif_receive_skb(skb); +#endif + } + } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + while ((skb = skb_dequeue (&mhi_netdev->qmap_chain))) { + mhi_netdev_upate_rx_stats(mhi_netdev, 1, skb->len); + skb->dev = mhi_netdev->ndev; + skb->protocol = mhi_netdev_ip_type_trans(skb->data[0]); + netif_receive_skb(skb); + } + } + + /* queue new buffers */ + if (!delayed_work_pending(&mhi_netdev->alloc_work)) { + ret = mhi_netdev->rx_queue(mhi_netdev, GFP_ATOMIC); + if (ret == -ENOMEM) { + //MSG_LOG("out of tre, queuing bg worker\n"); //do not print in softirq + mhi_netdev->stats.alloc_failed++; + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); + } + } + + /* complete work if # of packet processed less than allocated budget */ + if (rx_work < budget) + napi_complete(napi); + + MSG_VERB("polled %d pkts\n", rx_work); + + return rx_work; +} + +static int mhi_netdev_open(struct net_device *ndev) +{ + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + MSG_LOG("Opened net dev interface\n"); + + /* tx queue may not necessarily be stopped already + * so stop the queue if tx path is not enabled + */ + if (!mhi_dev->ul_chan) + netif_stop_queue(ndev); + else + netif_start_queue(ndev); + + return 0; + +} + +static int mhi_netdev_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + + if (new_mtu < 0 || mhi_dev->mtu < new_mtu) + return -EINVAL; + + ndev->mtu = new_mtu; + return 0; +} + +static netdev_tx_t mhi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mhi_netdev_priv *mhi_netdev_priv = netdev_priv(dev); + struct mhi_netdev *mhi_netdev = mhi_netdev_priv->mhi_netdev; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int res = 0; + struct skb_data *entry = (struct skb_data *)(skb->cb); + + entry->packets = 1; + entry->length = skb->len; + entry->bind_netdev = mhi_netdev; + + MSG_VERB("Entered\n"); + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + //qmap_hex_dump(__func__, skb->data, 32); + +#ifdef MHI_NETDEV_ONE_CARD_MODE + //printk("%s dev->type=%d\n", __func__, dev->type); + + if (dev->type == ARPHRD_ETHER) { + skb_reset_mac_header(skb); + +#ifdef QUECTEL_BRIDGE_MODE + if (mhi_netdev->bridge_mode && bridge_mode_tx_fixup(dev, skb, mhi_netdev->bridge_ipv4, mhi_netdev->bridge_mac) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } +#endif + + if ((mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) + && (skb_pull(skb, ETH_HLEN) == NULL)) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + + if (mhi_netdev->net_type == MHI_NET_MBIM) { + if (add_mbim_hdr(skb, QUECTEL_QMAP_MUX_ID) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + else if (mhi_netdev->net_type == MHI_NET_RMNET) { + if (mhi_netdev->qmap_version == 5) { + add_qhdr(skb, QUECTEL_QMAP_MUX_ID); + } + else if (mhi_netdev->qmap_version == 9) { + add_qhdr_v5(skb, QUECTEL_QMAP_MUX_ID); + } + else { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } +#else + if ((mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) + && skb->protocol != htons(ETH_P_MAP)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } +#endif + + if (mhi_netdev->net_type == MHI_NET_MBIM) { + struct mhi_mbim_hdr *mhdr = (struct mhi_mbim_hdr *)skb->data; + mhdr->nth16.wSequence = cpu_to_le16(mhi_netdev->mbim_ctx.tx_seq++); + } + + if (unlikely(mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) < 16)) { + u32 i = 0; + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + struct net_device *qmap_net = mhi_netdev->mpQmapNetDev[i]; + if (qmap_net) { + netif_stop_queue(qmap_net); + } + } + + netif_stop_queue(dev); + } + + res = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, skb, skb->len, + MHI_EOT); + + //printk("%s transfer res=%d\n", __func__, res); + if (unlikely(res)) { + dev_kfree_skb_any(skb); + dev->stats.tx_errors++; + } + + MSG_VERB("Exited\n"); + + return NETDEV_TX_OK; +} + +#if defined(MHI_NETDEV_STATUS64) +static struct rtnl_link_stats64 * _mhi_netdev_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) +{ + struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + unsigned int start; + int cpu; + + netdev_stats_to_stats64(stats, &ndev->stats); + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats64; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0)) + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(mhi_netdev->stats64, cpu); + + do { + start = u64_stats_fetch_begin_irq(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; +#else + u64_stats_t rx_packets, rx_bytes; + u64_stats_t tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(mhi_netdev->stats64, cpu); + + do { + start = u64_stats_fetch_begin(&stats64->syncp); + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; + } while (u64_stats_fetch_retry(&stats64->syncp, start)); + + stats->rx_packets += u64_stats_read(&rx_packets); + stats->rx_bytes += u64_stats_read(&rx_bytes); + stats->tx_packets += u64_stats_read(&tx_packets); + stats->tx_bytes += u64_stats_read(&tx_bytes); +#endif + } + + return stats; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,10,0 )) //bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 +static void mhi_netdev_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { + _mhi_netdev_get_stats64(ndev, stats); +} +#else +static struct rtnl_link_stats64 * mhi_netdev_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { + _mhi_netdev_get_stats64(ndev, stats); + return stats; +} +#endif +#endif + +static int qmap_setting_store(struct mhi_netdev *mhi_netdev, QMAP_SETTING *qmap_settings, size_t size) { + if (qmap_settings->size != size) { + netdev_err(mhi_netdev->ndev, "ERROR: qmap_settings.size donot match!\n"); + return -EOPNOTSUPP; + } + + mhi_netdev->dl_minimum_padding = qmap_settings->dl_minimum_padding; + + return 0; +} + +static int qmap_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + struct mhi_netdev *mhi_netdev = ndev_to_mhi(dev); + int rc = -EOPNOTSUPP; + uint link_state = 0; + QMAP_SETTING qmap_settings = {0}; + + switch (cmd) { + case 0x89F1: //SIOCDEVPRIVATE + rc = copy_from_user(&link_state, ifr->ifr_ifru.ifru_data, sizeof(link_state)); + if (!rc) { + char buf[32]; + snprintf(buf, sizeof(buf), "%u", link_state); + link_state_store(&dev->dev, NULL, buf, strlen(buf)); + } + break; + + case 0x89F2: //SIOCDEVPRIVATE + rc = copy_from_user(&qmap_settings, ifr->ifr_ifru.ifru_data, sizeof(qmap_settings)); + if (!rc) { + rc = qmap_setting_store(mhi_netdev, &qmap_settings, sizeof(qmap_settings)); + } + break; + + case 0x89F3: //SIOCDEVPRIVATE + if (mhi_netdev->use_rmnet_usb) { + rc = copy_to_user(ifr->ifr_ifru.ifru_data, &mhi_netdev->rmnet_info, sizeof(RMNET_INFO)); + } + break; + + default: + break; + } + + return rc; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 5,14,0 )) //b9067f5dc4a07c8e24e01a1b277c6722d91be39e +#define use_ndo_siocdevprivate +#endif +#ifdef use_ndo_siocdevprivate +static int qmap_ndo_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { + return qmap_ndo_do_ioctl(dev, ifr, cmd); +} +#endif + +static const struct net_device_ops mhi_netdev_ops_ip = { + .ndo_open = mhi_netdev_open, + .ndo_start_xmit = mhi_netdev_xmit, + //.ndo_do_ioctl = mhi_netdev_ioctl, + .ndo_change_mtu = mhi_netdev_change_mtu, +#if defined(MHI_NETDEV_STATUS64) + .ndo_get_stats64 = mhi_netdev_get_stats64, +#endif + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = qmap_ndo_do_ioctl, +#ifdef use_ndo_siocdevprivate + .ndo_siocdevprivate = qmap_ndo_siocdevprivate, +#endif +}; + +static void mhi_netdev_get_drvinfo (struct net_device *ndev, struct ethtool_drvinfo *info) +{ + //struct mhi_netdev *mhi_netdev = ndev_to_mhi(ndev); + + strlcpy (info->driver, "pcie_mhi", sizeof info->driver); + strlcpy (info->version, PCIE_MHI_DRIVER_VERSION, sizeof info->version); +} + +static const struct ethtool_ops mhi_netdev_ethtool_ops = { + .get_drvinfo = mhi_netdev_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +static void mhi_netdev_setup(struct net_device *dev) +{ + dev->netdev_ops = &mhi_netdev_ops_ip; + ether_setup(dev); + + dev->ethtool_ops = &mhi_netdev_ethtool_ops; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + __dev_addr_set (dev, node_id, sizeof node_id); +#else + memcpy (dev->dev_addr, node_id, sizeof node_id); +#endif + /* set this after calling ether_setup */ + dev->header_ops = 0; /* No header */ + dev->hard_header_len = 0; + dev->type = ARPHRD_NONE; + dev->addr_len = 0; + dev->flags |= IFF_NOARP; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); //POINTOPOINT will make SFE work wrong + dev->watchdog_timeo = WATCHDOG_TIMEOUT; + //on OpenWrt, if set rmnet_mhi0.1 as WAN, '/sbin/netifd' will auto create VLAN for rmnet_mhi0 + dev->features |= (NETIF_F_VLAN_CHALLENGED); + +#ifdef MHI_NETDEV_ONE_CARD_MODE + if (mhi_mbim_enabled) { + dev->needed_headroom = sizeof(struct mhi_mbim_hdr); + } +#endif +} + +/* enable mhi_netdev netdev, call only after grabbing mhi_netdev.mutex */ +static int mhi_netdev_enable_iface(struct mhi_netdev *mhi_netdev) +{ + int ret = 0; +#if 0 + char ifalias[IFALIASZ]; +#endif + char ifname[IFNAMSIZ]; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int no_tre; + + MSG_LOG("Prepare the channels for transfer\n"); + + ret = mhi_prepare_for_transfer(mhi_dev); + if (ret) { + MSG_ERR("Failed to start TX chan ret %d\n", ret); + goto mhi_failed_to_start; + } + + /* first time enabling the node */ + if (!mhi_netdev->ndev) { + struct mhi_netdev_priv *mhi_netdev_priv; + +#if 0 + snprintf(ifalias, sizeof(ifalias), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, + mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_netdev->alias); +#endif + + snprintf(ifname, sizeof(ifname), "%s%d", + mhi_netdev->interface_name, mhi_netdev->mhi_dev->mhi_cntrl->cntrl_idx); + + rtnl_lock(); +#ifdef NET_NAME_PREDICTABLE + mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv), + ifname, NET_NAME_PREDICTABLE, + mhi_netdev_setup); +#else + mhi_netdev->ndev = alloc_netdev(sizeof(*mhi_netdev_priv), + ifname, + mhi_netdev_setup); +#endif + + if (!mhi_netdev->ndev) { + ret = -ENOMEM; + rtnl_unlock(); + goto net_dev_alloc_fail; + } + + //mhi_netdev->ndev->mtu = mhi_dev->mtu; + SET_NETDEV_DEV(mhi_netdev->ndev, &mhi_dev->dev); +#if 0 + dev_set_alias(mhi_netdev->ndev, ifalias, strlen(ifalias)); +#endif + mhi_netdev_priv = netdev_priv(mhi_netdev->ndev); + mhi_netdev_priv->mhi_netdev = mhi_netdev; + + if (mhi_netdev->net_type == MHI_NET_RMNET || mhi_netdev->net_type == MHI_NET_MBIM) { +#ifdef QUECTEL_BRIDGE_MODE + mhi_netdev->bridge_mode = bridge_mode; +#endif + mhi_netdev->ndev->sysfs_groups[0] = &pcie_mhi_sysfs_attr_group; + } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->ndev->mtu = mhi_netdev->mru; + } + rtnl_unlock(); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) + mhi_netdev->ndev->max_mtu = mhi_netdev->max_mtu; //first net card +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + netif_napi_add_weight(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight); +#else + netif_napi_add(mhi_netdev->ndev, &mhi_netdev->napi, mhi_netdev_poll, poll_weight); +#endif + ret = register_netdev(mhi_netdev->ndev); + if (ret) { + MSG_ERR("Network device registration failed\n"); + goto net_dev_reg_fail; + } + + netif_carrier_off(mhi_netdev->ndev); + } + + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = true; + write_unlock_irq(&mhi_netdev->pm_lock); + + /* queue buffer for rx path */ + no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + ret = mhi_netdev_alloc_skb(mhi_netdev, GFP_KERNEL); + if (ret) + schedule_delayed_work(&mhi_netdev->alloc_work, msecs_to_jiffies(20)); + + napi_enable(&mhi_netdev->napi); + + MSG_LOG("Exited.\n"); + + return 0; + +net_dev_reg_fail: + netif_napi_del(&mhi_netdev->napi); + free_netdev(mhi_netdev->ndev); + mhi_netdev->ndev = NULL; + +net_dev_alloc_fail: + mhi_unprepare_from_transfer(mhi_dev); + +mhi_failed_to_start: + MSG_ERR("Exited ret %d.\n", ret); + + return ret; +} + +static void mhi_netdev_xfer_ul_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb = mhi_result->buf_addr; + struct net_device *ndev = mhi_netdev->ndev; + struct skb_data *entry = (struct skb_data *)(skb->cb); + + if (entry->bind_netdev != mhi_netdev) { + MSG_ERR("%s error!\n", __func__); + return; + } + + if (likely(mhi_result->transaction_status == 0)) { + mhi_netdev_upate_tx_stats(mhi_netdev, entry->packets, entry->length); + + if (netif_queue_stopped(ndev) && mhi_netdev->enabled + && mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 32) { + int i = 0; + + netif_wake_queue(ndev); + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + struct net_device *qmap_net = mhi_netdev->mpQmapNetDev[i]; + if (qmap_net) { + if (netif_queue_stopped(qmap_net)) + netif_wake_queue(qmap_net); + } + } + } + } + + entry->bind_netdev = NULL; + entry->packets = 1; + entry->length = 0; + dev_kfree_skb(skb); +} + +static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb = mhi_result->buf_addr; + struct mhi_skb_priv *skb_priv = (struct mhi_skb_priv *)(skb->cb); + + if (unlikely(skb_priv->bind_netdev != mhi_netdev)) { + MSG_ERR("%s error!\n", __func__); + return; + } + + if (unlikely(mhi_result->transaction_status)) { + if (mhi_result->transaction_status != -ENOTCONN) + MSG_ERR("%s transaction_status = %d!\n", __func__, mhi_result->transaction_status); + skb_priv->bind_netdev = NULL; + dev_kfree_skb(skb); + return; + } + +#if defined(CONFIG_IPQ5018_RATE_CONTROL) + if (likely(mhi_netdev->mhi_rate_control)) { + u32 time_interval = 0; + u32 time_difference = 0; + u32 cntfrq; + u64 second_jiffy; + u64 bytes_received_2; + struct net_device *ndev = mhi_netdev->ndev; + + if (mhi_netdev->first_jiffy) { + #if LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0) + second_jiffy = arch_counter_get_cntvct(); + #else + second_jiffy = __arch_counter_get_cntvct(); + #endif + bytes_received_2 = mhi_netdev->bytes_received_2; + if ((second_jiffy > mhi_netdev->first_jiffy) && + (bytes_received_2 > mhi_netdev->bytes_received_1)) { + + time_difference = (second_jiffy - mhi_netdev->first_jiffy); + time_interval = (time_difference / mhi_netdev->cntfrq_per_msec); + + /* 1.8Gbps is 225,000,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 225000 bytes */ + /* For 100ms 22,500,000 bytes */ + /* For 10ms 2,250,000 bytes */ + + /* 1.7Gbps is 212,500,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 212500 bytes */ + /* For 100ms 21,250,000 bytes */ + /* For 10ms 2,125,000 bytes */ + + /* 1.6Gbps is 200,000,000bytes per second */ + /* We wills sample at 100ms interval */ + /* For 1ms 200,000 bytes */ + /* For 100ms 20,000,000 bytes */ + /* For 10ms 2,000,000 bytes */ + + if (time_interval < 100) { + if ((bytes_received_2 - mhi_netdev->bytes_received_1) > 22500000) { + ndev->stats.rx_dropped ++; + dev_kfree_skb(skb); + return; + } + } else { + mhi_netdev->first_jiffy = second_jiffy; + mhi_netdev->bytes_received_1 = bytes_received_2; + } + } else { + mhi_netdev->first_jiffy = second_jiffy; + mhi_netdev->bytes_received_1 = bytes_received_2; + } + } else { + #if LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0) + mhi_netdev->first_jiffy = arch_counter_get_cntvct(); + #else + mhi_netdev->first_jiffy = __arch_counter_get_cntvct(); + #endif + + cntfrq = arch_timer_get_cntfrq(); + mhi_netdev->cntfrq_per_msec = cntfrq / 1000; + } + mhi_netdev->bytes_received_2 += mhi_result->bytes_xferd; + } +#endif + +#if 0 + { + static size_t bytes_xferd = 0; + if (mhi_result->bytes_xferd > bytes_xferd) { + bytes_xferd = mhi_result->bytes_xferd; + printk(KERN_DEBUG "bytes_xferd=%zd\n", bytes_xferd); + } + } +#endif + + skb_put(skb, mhi_result->bytes_xferd); + + qmap_hex_dump(__func__, skb->data, skb->len); + + skb_priv->bind_netdev = NULL; + skb_queue_tail(&mhi_netdev->qmap_chain, skb); +} + +static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + + if (mhi_cb != MHI_CB_PENDING_DATA) + return; + + if (napi_schedule_prep(&mhi_netdev->napi)) { + __napi_schedule(&mhi_netdev->napi); + mhi_netdev->stats.rx_int++; + return; + } +} + +#ifdef CONFIG_DEBUG_FS + +struct dentry *mhi_netdev_debugfs_dentry; + +static int mhi_netdev_init_debugfs_states_show(struct seq_file *m, void *d) +{ + struct mhi_netdev *mhi_netdev = m->private; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + +#ifdef TS_DEBUG + struct timespec now_ts, diff_ts; + getnstimeofday(&now_ts); + diff_ts = timespec_sub(now_ts, mhi_netdev->diff_ts); + mhi_netdev->diff_ts = now_ts; +#endif + + seq_printf(m, + "tx_tre:%d rx_tre:%d qmap_chain:%u skb_chain:%u tx_allocated:%u rx_allocated:%u\n", + mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE), + mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE), + mhi_netdev->qmap_chain.qlen, + mhi_netdev->skb_chain.qlen, + mhi_netdev->tx_allocated.qlen, + mhi_netdev->rx_allocated.qlen); + + seq_printf(m, + "netif_queue_stopped:%d, link_state:0x%x, flow_control:0x%x\n", + netif_queue_stopped(mhi_netdev->ndev), mhi_netdev->link_state, mhi_netdev->flow_control); + + seq_printf(m, + "rmnet_map_command_stats: %u, %u, %u, %u, %u, %u, %u, %u, %u, %u\n", + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_NONE], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_DISABLE], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_ENABLE], + mhi_netdev->rmnet_map_command_stats[3], + mhi_netdev->rmnet_map_command_stats[4], + mhi_netdev->rmnet_map_command_stats[5], + mhi_netdev->rmnet_map_command_stats[6], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_START], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_FLOW_END], + mhi_netdev->rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]); + +#ifdef TS_DEBUG + seq_printf(m, + "qmap_ts:%ld.%ld, skb_ts:%ld.%ld, diff_ts:%ld.%ld\n", + mhi_netdev->qmap_ts.tv_sec, mhi_netdev->qmap_ts.tv_nsec, + mhi_netdev->skb_ts.tv_sec, mhi_netdev->skb_ts.tv_nsec, + diff_ts.tv_sec, diff_ts.tv_nsec); + mhi_netdev->clear_ts = 1; +#endif + + return 0; +} + +static int mhi_netdev_init_debugfs_states_open(struct inode *inode, + struct file *fp) +{ + return single_open(fp, mhi_netdev_init_debugfs_states_show, inode->i_private); +} + +static const struct file_operations mhi_netdev_debugfs_state_ops = { + .open = mhi_netdev_init_debugfs_states_open, + .release = single_release, + .read = seq_read, +}; + +static int mhi_netdev_debugfs_trigger_reset(void *data, u64 val) +{ + struct mhi_netdev *mhi_netdev = data; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + int ret; + + MSG_LOG("Triggering channel reset\n"); + + /* disable the interface so no data processing */ + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = false; + write_unlock_irq(&mhi_netdev->pm_lock); + napi_disable(&mhi_netdev->napi); + + /* disable all hardware channels */ + mhi_unprepare_from_transfer(mhi_dev); + + /* clean up all alocated buffers */ + mhi_netdev_dealloc(mhi_netdev); + + MSG_LOG("Restarting iface\n"); + + ret = mhi_netdev_enable_iface(mhi_netdev); + if (ret) + return ret; + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(mhi_netdev_debugfs_trigger_reset_fops, NULL, + mhi_netdev_debugfs_trigger_reset, "%llu\n"); + +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) +{ + char node_name[32]; + int i; + const umode_t mode = 0600; + struct mhi_device *mhi_dev = mhi_netdev->mhi_dev; + struct dentry *dentry = mhi_netdev_debugfs_dentry; + + const struct { + char *name; + u32 *ptr; + } debugfs_table[] = { + { + "rx_int", + &mhi_netdev->stats.rx_int + }, + { + "tx_full", + &mhi_netdev->stats.tx_full + }, + { + "tx_pkts", + &mhi_netdev->stats.tx_pkts + }, + { + "rx_budget_overflow", + &mhi_netdev->stats.rx_budget_overflow + }, + { + "rx_allocated", + &mhi_netdev->stats.rx_allocated + }, + { + "tx_allocated", + &mhi_netdev->stats.tx_allocated + }, + { + "alloc_failed", + &mhi_netdev->stats.alloc_failed + }, + { + NULL, NULL + }, + }; + + /* Both tx & rx client handle contain same device info */ + snprintf(node_name, sizeof(node_name), "%s_%04x_%02u.%02u.%02u_%u", + mhi_netdev->interface_name, mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, mhi_netdev->alias); + + if (IS_ERR_OR_NULL(dentry)) + return; + + mhi_netdev->dentry = debugfs_create_dir(node_name, dentry); + if (IS_ERR_OR_NULL(mhi_netdev->dentry)) + return; + + debugfs_create_u32("msg_lvl", mode, mhi_netdev->dentry, + (u32 *)&mhi_netdev->msg_lvl); + + /* Add debug stats table */ + for (i = 0; debugfs_table[i].name; i++) { + debugfs_create_u32(debugfs_table[i].name, mode, + mhi_netdev->dentry, + debugfs_table[i].ptr); + } + + debugfs_create_file("reset", mode, mhi_netdev->dentry, mhi_netdev, + &mhi_netdev_debugfs_trigger_reset_fops); + debugfs_create_file("states", 0444, mhi_netdev->dentry, mhi_netdev, + &mhi_netdev_debugfs_state_ops); +} + +static void mhi_netdev_create_debugfs_dir(struct dentry *parent) +{ + mhi_netdev_debugfs_dentry = debugfs_create_dir(MHI_NETDEV_DRIVER_NAME, parent); +} + +#else + +static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev) +{ +} + +static void mhi_netdev_create_debugfs_dir(struct dentry *parent) +{ +} + +#endif + +static void mhi_netdev_remove(struct mhi_device *mhi_dev) +{ + struct mhi_netdev *mhi_netdev = mhi_device_get_devdata(mhi_dev); + struct sk_buff *skb; + + MSG_LOG("Remove notification received\n"); + + write_lock_irq(&mhi_netdev->pm_lock); + mhi_netdev->enabled = false; + write_unlock_irq(&mhi_netdev->pm_lock); + + if (mhi_netdev->use_rmnet_usb) { +#ifndef MHI_NETDEV_ONE_CARD_MODE + unsigned i; + + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + if (mhi_netdev->mpQmapNetDev[i]) { + rmnet_vnd_unregister_device(mhi_netdev->mpQmapNetDev[i]); + mhi_netdev->mpQmapNetDev[i] = NULL; + } + } + + rtnl_lock(); +#ifdef ANDROID_gki + if (mhi_netdev->ndev && rtnl_dereference(mhi_netdev->ndev->rx_handler)) +#else + if (netdev_is_rx_handler_busy(mhi_netdev->ndev)) +#endif + netdev_rx_handler_unregister(mhi_netdev->ndev); + rtnl_unlock(); +#endif + } + + while ((skb = skb_dequeue (&mhi_netdev->skb_chain))) + dev_kfree_skb_any(skb); + while ((skb = skb_dequeue (&mhi_netdev->qmap_chain))) + dev_kfree_skb_any(skb); + while ((skb = skb_dequeue (&mhi_netdev->rx_allocated))) + dev_kfree_skb_any(skb); + while ((skb = skb_dequeue (&mhi_netdev->tx_allocated))) + dev_kfree_skb_any(skb); + + napi_disable(&mhi_netdev->napi); + netif_napi_del(&mhi_netdev->napi); + mhi_netdev_dealloc(mhi_netdev); + unregister_netdev(mhi_netdev->ndev); +#if defined(MHI_NETDEV_STATUS64) + free_percpu(mhi_netdev->stats64); +#endif + free_netdev(mhi_netdev->ndev); + flush_delayed_work(&mhi_netdev->alloc_work); + + if (!IS_ERR_OR_NULL(mhi_netdev->dentry)) + debugfs_remove_recursive(mhi_netdev->dentry); +} + +static int mhi_netdev_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + int ret; + struct mhi_netdev *mhi_netdev; + + mhi_netdev = devm_kzalloc(&mhi_dev->dev, sizeof(*mhi_netdev), + GFP_KERNEL); + if (!mhi_netdev) + return -ENOMEM; + + if (!strcmp(id->chan, "IP_HW0")) { + if (mhi_mbim_enabled) + mhi_netdev->net_type = MHI_NET_MBIM; + else + mhi_netdev->net_type = MHI_NET_RMNET; + } + else if (!strcmp(id->chan, "IP_SW0")) { + mhi_netdev->net_type = MHI_NET_ETHER; + } + else { + return -EINVAL; + } + + mhi_netdev->alias = 0; + + mhi_netdev->mhi_dev = mhi_dev; + mhi_device_set_devdata(mhi_dev, mhi_netdev); + + mhi_netdev->mru = (15*1024); ///etc/data/qnicorn_config.xml dataformat_agg_dl_size 15*1024 + mhi_netdev->max_mtu = mhi_netdev->mru - (sizeof(struct rmnet_map_v5_csum_header) + sizeof(struct rmnet_map_header)); + if (mhi_netdev->net_type == MHI_NET_MBIM) { + mhi_netdev->mru = ncmNTBParams.dwNtbInMaxSize; + mhi_netdev->mbim_ctx.rx_max = mhi_netdev->mru; + mhi_netdev->max_mtu = mhi_netdev->mru - sizeof(struct mhi_mbim_hdr); + } + else if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->mru = 8*1024; + mhi_netdev->max_mtu = mhi_netdev->mru; + } + mhi_netdev->qmap_size = mhi_netdev->mru; + +#if defined(MHI_NETDEV_STATUS64) + mhi_netdev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!mhi_netdev->stats64) + return -ENOMEM; +#endif + + if (!strcmp(id->chan, "IP_HW0")) + mhi_netdev->interface_name = "rmnet_mhi"; + else if (!strcmp(id->chan, "IP_SW0")) + mhi_netdev->interface_name = "mhi_swip"; + else + mhi_netdev->interface_name = id->chan; + + mhi_netdev->qmap_mode = qmap_mode; + mhi_netdev->qmap_version = 5; + mhi_netdev->use_rmnet_usb = 1; + if ((mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0306) + || (mhi_dev->vendor == 0x17cb && mhi_dev->dev_id == 0x0308) + || (mhi_dev->vendor == 0x1eac && mhi_dev->dev_id == 0x1004) + ) { + mhi_netdev->qmap_version = 9; + } + if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->qmap_mode = 1; + mhi_netdev->qmap_version = 0; + mhi_netdev->use_rmnet_usb = 0; + } + rmnet_info_set(mhi_netdev, &mhi_netdev->rmnet_info); + + mhi_netdev->rx_queue = mhi_netdev_alloc_skb; + + spin_lock_init(&mhi_netdev->rx_lock); + rwlock_init(&mhi_netdev->pm_lock); + INIT_DELAYED_WORK(&mhi_netdev->alloc_work, mhi_netdev_alloc_work); + skb_queue_head_init(&mhi_netdev->qmap_chain); + skb_queue_head_init(&mhi_netdev->skb_chain); + skb_queue_head_init(&mhi_netdev->tx_allocated); + skb_queue_head_init(&mhi_netdev->rx_allocated); + + mhi_netdev->msg_lvl = MHI_MSG_LVL_INFO; + + /* setup network interface */ + ret = mhi_netdev_enable_iface(mhi_netdev); + if (ret) { + pr_err("Error mhi_netdev_enable_iface ret:%d\n", ret); + return ret; + } + + mhi_netdev_create_debugfs(mhi_netdev); + + if (mhi_netdev->net_type == MHI_NET_ETHER) { + mhi_netdev->mpQmapNetDev[0] = mhi_netdev->ndev; + netif_carrier_on(mhi_netdev->ndev); + } + else if (mhi_netdev->use_rmnet_usb) { +#ifdef MHI_NETDEV_ONE_CARD_MODE + mhi_netdev->mpQmapNetDev[0] = mhi_netdev->ndev; + strcpy(mhi_netdev->rmnet_info.ifname[0], mhi_netdev->mpQmapNetDev[0]->name); + mhi_netdev->rmnet_info.mux_id[0] = QUECTEL_QMAP_MUX_ID; +#else + unsigned i; + + for (i = 0; i < mhi_netdev->qmap_mode; i++) { + u8 mux_id = QUECTEL_QMAP_MUX_ID+i; + mhi_netdev->mpQmapNetDev[i] = rmnet_vnd_register_device(mhi_netdev, i, mux_id); + if (mhi_netdev->mpQmapNetDev[i]) { + strcpy(mhi_netdev->rmnet_info.ifname[i], mhi_netdev->mpQmapNetDev[i]->name); + mhi_netdev->rmnet_info.mux_id[i] = mux_id; + } + } + + rtnl_lock(); + /* when open hyfi function, run cm will make system crash */ + //netdev_rx_handler_register(mhi_netdev->ndev, rmnet_rx_handler, mhi_netdev); + netdev_rx_handler_register(mhi_netdev->ndev, rmnet_rx_handler, NULL); + rtnl_unlock(); +#endif + } + +#if defined(CONFIG_IPQ5018_RATE_CONTROL) + mhi_netdev->mhi_rate_control = 1; +#endif + + return 0; +} + +static const struct mhi_device_id mhi_netdev_match_table[] = { + { .chan = "IP_HW0" }, + { .chan = "IP_SW0" }, + // ADPL do not register as a netcard. xingduo.du 2023-02-20 + // { .chan = "IP_HW_ADPL" }, + { }, +}; + +static struct mhi_driver mhi_netdev_driver = { + .id_table = mhi_netdev_match_table, + .probe = mhi_netdev_probe, + .remove = mhi_netdev_remove, + .ul_xfer_cb = mhi_netdev_xfer_ul_cb, + .dl_xfer_cb = mhi_netdev_xfer_dl_cb, + .status_cb = mhi_netdev_status_cb, + .driver = { + .name = "mhi_netdev", + .owner = THIS_MODULE, + } +}; + +int __init mhi_device_netdev_init(struct dentry *parent) +{ +#ifdef CONFIG_QCA_NSS_DRV + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (!nss_cb) { + printk(KERN_ERR "mhi_device_netdev_init: driver load must after '/etc/modules.d/42-rmnet-nss'\n"); + } +#endif + + mhi_netdev_create_debugfs_dir(parent); + + return mhi_driver_register(&mhi_netdev_driver); +} + +void mhi_device_netdev_exit(void) +{ +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(mhi_netdev_debugfs_dentry); +#endif + mhi_driver_unregister(&mhi_netdev_driver); +} diff --git a/wwan/driver/quectel_MHI/src/devices/mhi_satellite.c b/wwan/driver/quectel_MHI/src/devices/mhi_satellite.c new file mode 100644 index 0000000..d1071ec --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/mhi_satellite.c @@ -0,0 +1,1153 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MHI_SAT_DRIVER_NAME "mhi_satellite" + +static bool mhi_sat_defer_init = true; /* set by default */ + +/* logging macros */ +#define IPC_LOG_PAGES (10) +#define IPC_LOG_LVL (MHI_MSG_LVL_INFO) +#define KLOG_LVL (MHI_MSG_LVL_ERROR) + +#define MHI_SUBSYS_LOG(fmt, ...) do { \ + if (!subsys) \ + break; \ + if (mhi_sat_driver.klog_lvl <= MHI_MSG_LVL_INFO) \ + pr_info("[I][%s][%s] " fmt, __func__, subsys->name, \ + ##__VA_ARGS__);\ + if (subsys->ipc_log && mhi_sat_driver.ipc_log_lvl <= \ + MHI_MSG_LVL_INFO) \ + ipc_log_string(subsys->ipc_log, "[I][%s] " fmt, __func__, \ + ##__VA_ARGS__); \ +} while (0) + +#define MHI_SAT_LOG(fmt, ...) do { \ + if (!subsys || !sat_cntrl) \ + break; \ + if (mhi_sat_driver.klog_lvl <= MHI_MSG_LVL_INFO) \ + pr_info("[I][%s][%s][%x] " fmt, __func__, subsys->name, \ + sat_cntrl->dev_id, ##__VA_ARGS__);\ + if (subsys->ipc_log && mhi_sat_driver.ipc_log_lvl <= \ + MHI_MSG_LVL_INFO) \ + ipc_log_string(subsys->ipc_log, "[I][%s][%x] " fmt, __func__, \ + sat_cntrl->dev_id, ##__VA_ARGS__); \ +} while (0) + +#define MHI_SAT_ERR(fmt, ...) do { \ + if (!subsys || !sat_cntrl) \ + break; \ + if (mhi_sat_driver.klog_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s][%s][%x] " fmt, __func__, subsys->name, \ + sat_cntrl->dev_id, ##__VA_ARGS__); \ + if (subsys->ipc_log && mhi_sat_driver.ipc_log_lvl <= \ + MHI_MSG_LVL_ERROR) \ + ipc_log_string(subsys->ipc_log, "[E][%s][%x] " fmt, __func__, \ + sat_cntrl->dev_id, ##__VA_ARGS__); \ +} while (0) + +#define MHI_SAT_ASSERT(cond, msg) do { \ + if (cond) \ + panic(msg); \ +} while (0) + +/* mhi sys error command */ +#define MHI_TRE_CMD_SYS_ERR_PTR (0) +#define MHI_TRE_CMD_SYS_ERR_D0 (0) +#define MHI_TRE_CMD_SYS_ERR_D1 (MHI_PKT_TYPE_SYS_ERR_CMD << 16) + +/* mhi state change event */ +#define MHI_TRE_EVT_MHI_STATE_PTR (0) +#define MHI_TRE_EVT_MHI_STATE_D0(state) (state << 24) +#define MHI_TRE_EVT_MHI_STATE_D1 (MHI_PKT_TYPE_STATE_CHANGE_EVENT << 16) + +/* mhi exec env change event */ +#define MHI_TRE_EVT_EE_PTR (0) +#define MHI_TRE_EVT_EE_D0(ee) (ee << 24) +#define MHI_TRE_EVT_EE_D1 (MHI_PKT_TYPE_EE_EVENT << 16) + +/* mhi config event */ +#define MHI_TRE_EVT_CFG_PTR(base_addr) (base_addr) +#define MHI_TRE_EVT_CFG_D0(er_base, num) ((er_base << 16) | (num & 0xFFFF)) +#define MHI_TRE_EVT_CFG_D1 (MHI_PKT_TYPE_CFG_EVENT << 16) + +/* command completion event */ +#define MHI_TRE_EVT_CMD_COMPLETION_PTR(ptr) (ptr) +#define MHI_TRE_EVT_CMD_COMPLETION_D0(code) (code << 24) +#define MHI_TRE_EVT_CMD_COMPLETION_D1 (MHI_PKT_TYPE_CMD_COMPLETION_EVENT << 16) + +/* packet parser macros */ +#define MHI_TRE_GET_PTR(tre) ((tre)->ptr) +#define MHI_TRE_GET_SIZE(tre) ((tre)->dword[0]) +#define MHI_TRE_GET_CCS(tre) (((tre)->dword[0] >> 24) & 0xFF) +#define MHI_TRE_GET_ID(tre) (((tre)->dword[1] >> 24) & 0xFF) +#define MHI_TRE_GET_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF) +#define MHI_TRE_IS_ER_CTXT_TYPE(tre) (((tre)->dword[1]) & 0x1) + +/* creates unique device ID based on connection topology */ +#define MHI_SAT_CREATE_DEVICE_ID(dev, domain, bus, slot) \ + ((dev & 0xFFFF) << 16 | (domain & 0xF) << 12 | (bus & 0xFF) << 4 | \ + (slot & 0xF)) + +/* mhi core definitions */ +#define MHI_CTXT_TYPE_GENERIC (0xA) + +struct __packed mhi_generic_ctxt { + u32 reserved0; + u32 type; + u32 reserved1; + u64 ctxt_base; + u64 ctxt_size; + u64 reserved[2]; +}; + +enum mhi_pkt_type { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_CTXT_UPDATE_CMD = 0x64, + MHI_PKT_TYPE_IOMMU_MAP_CMD = 0x65, + MHI_PKT_TYPE_CFG_EVENT = 0x6E, + MHI_PKT_TYPE_SYS_ERR_CMD = 0xFF, +}; + +enum mhi_cmd_type { + MHI_CMD_TYPE_RESET = 0x10, + MHI_CMD_TYPE_STOP = 0x11, + MHI_CMD_TYPE_START = 0x12, +}; + +/* mhi event completion codes */ +enum mhi_ev_ccs { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +/* satellite subsystem definitions */ +enum subsys_id { + SUBSYS_ADSP, + SUBSYS_CDSP, + SUBSYS_SLPI, + SUBSYS_MODEM, + SUBSYS_MAX, +}; + +static const char * const subsys_names[SUBSYS_MAX] = { + [SUBSYS_ADSP] = "adsp", + [SUBSYS_CDSP] = "cdsp", + [SUBSYS_SLPI] = "slpi", + [SUBSYS_MODEM] = "modem", +}; + +struct mhi_sat_subsys { + const char *name; + + struct rpmsg_device *rpdev; /* rpmsg device */ + + /* + * acquire either mutex or spinlock to walk controller list + * acquire both when modifying list + */ + struct list_head cntrl_list; /* controllers list */ + struct mutex cntrl_mutex; /* mutex to walk/modify controllers list */ + spinlock_t cntrl_lock; /* lock to walk/modify controllers list */ + + void *ipc_log; +}; + +/* satellite IPC definitions */ +#define SAT_MAJOR_VERSION (1) +#define SAT_MINOR_VERSION (0) +#define SAT_RESERVED_SEQ_NUM (0xFFFF) +#define SAT_MSG_SIZE(n) (sizeof(struct sat_header) + \ + (n * sizeof(struct sat_tre))) +#define SAT_TRE_SIZE(msg_size) (msg_size - sizeof(struct sat_header)) +#define SAT_TRE_OFFSET(msg) (msg + sizeof(struct sat_header)) +#define SAT_TRE_NUM_PKTS(payload_size) ((payload_size) / sizeof(struct sat_tre)) + +/* satellite IPC msg type */ +enum sat_msg_id { + SAT_MSG_ID_ACK = 0xA, + SAT_MSG_ID_CMD = 0xC, + SAT_MSG_ID_EVT = 0xE, +}; + +/* satellite IPC context type */ +enum sat_ctxt_type { + SAT_CTXT_TYPE_CHAN = 0x0, + SAT_CTXT_TYPE_EVENT = 0x1, + SAT_CTXT_TYPE_MAX, +}; + +/* satellite IPC context string */ +#define TO_SAT_CTXT_TYPE_STR(type) (type >= SAT_CTXT_TYPE_MAX ? "INVALID" : \ + sat_ctxt_str[type]) + +const char * const sat_ctxt_str[SAT_CTXT_TYPE_MAX] = { + [SAT_CTXT_TYPE_CHAN] = "CCA", + [SAT_CTXT_TYPE_EVENT] = "ECA", +}; + +/* satellite IPC transfer ring element */ +struct __packed sat_tre { + u64 ptr; + u32 dword[2]; +}; + +/* satellite IPC header */ +struct __packed sat_header { + u16 major_ver; + u16 minor_ver; + u16 msg_id; + u16 seq; + u16 reply_seq; + u16 payload_size; + u32 dev_id; + u8 reserved[8]; +}; + +/* satellite driver definitions */ +struct mhi_sat_packet { + struct list_head node; + + struct mhi_sat_cntrl *cntrl; /* satellite controller reference */ + void *msg; /* incoming message */ +}; + +struct mhi_sat_cntrl { + struct list_head node; + + struct mhi_controller *mhi_cntrl; /* device MHI controller reference */ + struct mhi_sat_subsys *subsys; + + struct list_head dev_list; + struct list_head addr_map_list; /* IOMMU mapped addresses list */ + struct mutex list_mutex; /* mutex for devices and address map lists */ + + struct list_head packet_list; + spinlock_t pkt_lock; /* lock to walk/modify received packets list */ + + struct work_struct connect_work; /* subsystem connection worker */ + struct work_struct process_work; /* incoming packets processor */ + + /* mhi core/controller configurations */ + u32 dev_id; /* unique device ID with BDF as per connection topology */ + int er_base; /* event rings base index */ + int er_max; /* event rings max index */ + int num_er; /* total number of event rings */ + + /* satellite controller function counts */ + int num_devices; /* mhi devices current count */ + int max_devices; /* count of maximum devices for subsys/controller */ + u16 seq; /* internal sequence number for all outgoing packets */ + bool active; /* flag set if hello packet/MHI_CFG event was sent */ + + /* command completion variables */ + u16 last_cmd_seq; /* sequence number of last sent command packet */ + enum mhi_ev_ccs last_cmd_ccs; /* last command completion event code */ + struct completion completion; /* command completion event wait */ + struct mutex cmd_wait_mutex; /* command completion wait mutex */ +}; + +struct mhi_sat_device { + struct list_head node; + + struct mhi_device *mhi_dev; /* mhi device pointer */ + struct mhi_sat_cntrl *cntrl; /* parent controller */ + + bool chan_started; +}; + +struct mhi_sat_driver { + enum MHI_DEBUG_LEVEL ipc_log_lvl; /* IPC log level */ + enum MHI_DEBUG_LEVEL klog_lvl; /* klog/dmesg levels */ + + struct mhi_sat_subsys *subsys; /* pointer to subsystem array */ + unsigned int num_subsys; + + struct dentry *dentry; /* debugfs directory */ + bool deferred_init_done; /* flag for deferred init protection */ +}; + +static struct mhi_sat_driver mhi_sat_driver; + +static struct mhi_sat_subsys *find_subsys_by_name(const char *name) +{ + int i; + struct mhi_sat_subsys *subsys = mhi_sat_driver.subsys; + + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + if (!strcmp(name, subsys->name)) + return subsys; + } + + return NULL; +} + +static struct mhi_sat_cntrl *find_sat_cntrl_by_id(struct mhi_sat_subsys *subsys, + u32 dev_id) +{ + struct mhi_sat_cntrl *sat_cntrl; + unsigned long flags; + + spin_lock_irqsave(&subsys->cntrl_lock, flags); + list_for_each_entry(sat_cntrl, &subsys->cntrl_list, node) { + if (sat_cntrl->dev_id == dev_id) { + spin_unlock_irqrestore(&subsys->cntrl_lock, flags); + return sat_cntrl; + } + } + spin_unlock_irqrestore(&subsys->cntrl_lock, flags); + + return NULL; +} + +static struct mhi_sat_device *find_sat_dev_by_id( + struct mhi_sat_cntrl *sat_cntrl, int id, + enum sat_ctxt_type evt) +{ + struct mhi_sat_device *sat_dev; + int compare_id; + + mutex_lock(&sat_cntrl->list_mutex); + list_for_each_entry(sat_dev, &sat_cntrl->dev_list, node) { + compare_id = (evt == SAT_CTXT_TYPE_EVENT) ? + sat_dev->mhi_dev->dl_event_id : + sat_dev->mhi_dev->dl_chan_id; + + if (compare_id == id) { + mutex_unlock(&sat_cntrl->list_mutex); + return sat_dev; + } + } + mutex_unlock(&sat_cntrl->list_mutex); + + return NULL; +} + +static bool mhi_sat_isvalid_header(struct sat_header *hdr, int len) +{ + /* validate payload size */ + if (len >= sizeof(*hdr) && (len != hdr->payload_size + sizeof(*hdr))) + return false; + + /* validate SAT IPC version */ + if (hdr->major_ver != SAT_MAJOR_VERSION && + hdr->minor_ver != SAT_MINOR_VERSION) + return false; + + /* validate msg ID */ + if (hdr->msg_id != SAT_MSG_ID_CMD && hdr->msg_id != SAT_MSG_ID_EVT) + return false; + + return true; +} + +static int mhi_sat_wait_cmd_completion(struct mhi_sat_cntrl *sat_cntrl) +{ + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + int ret; + + reinit_completion(&sat_cntrl->completion); + + MHI_SAT_LOG("Wait for command completion\n"); + ret = wait_for_completion_timeout(&sat_cntrl->completion, + msecs_to_jiffies(sat_cntrl->mhi_cntrl->timeout_ms)); + if (!ret || sat_cntrl->last_cmd_ccs != MHI_EV_CC_SUCCESS) { + MHI_SAT_ERR("Command completion failure:seq:%u:ret:%d:ccs:%d\n", + sat_cntrl->last_cmd_seq, ret, sat_cntrl->last_cmd_ccs); + return -EIO; + } + + MHI_SAT_LOG("Command completion successful for seq:%u\n", + sat_cntrl->last_cmd_seq); + + return 0; +} + +static int mhi_sat_send_msg(struct mhi_sat_cntrl *sat_cntrl, + enum sat_msg_id type, u16 reply_seq, + void *msg, u16 msg_size) +{ + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct sat_header *hdr = msg; + + /* create sequence number for controller */ + sat_cntrl->seq++; + if (sat_cntrl->seq == SAT_RESERVED_SEQ_NUM) + sat_cntrl->seq = 0; + + /* populate header */ + hdr->major_ver = SAT_MAJOR_VERSION; + hdr->minor_ver = SAT_MINOR_VERSION; + hdr->msg_id = type; + hdr->seq = sat_cntrl->seq; + hdr->reply_seq = reply_seq; + hdr->payload_size = SAT_TRE_SIZE(msg_size); + hdr->dev_id = sat_cntrl->dev_id; + + /* save last sent command sequence number for completion event */ + if (type == SAT_MSG_ID_CMD) + sat_cntrl->last_cmd_seq = sat_cntrl->seq; + + return rpmsg_send(subsys->rpdev->ept, msg, msg_size); +} + +static void mhi_sat_process_cmds(struct mhi_sat_cntrl *sat_cntrl, + struct sat_header *hdr, struct sat_tre *pkt) +{ + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + int num_pkts = SAT_TRE_NUM_PKTS(hdr->payload_size), i; + + for (i = 0; i < num_pkts; i++, pkt++) { + enum mhi_ev_ccs code = MHI_EV_CC_INVALID; + + switch (MHI_TRE_GET_TYPE(pkt)) { + case MHI_PKT_TYPE_IOMMU_MAP_CMD: + { + struct mhi_buf *buf; + struct mhi_controller *mhi_cntrl = sat_cntrl->mhi_cntrl; + dma_addr_t iova = DMA_ERROR_CODE; + + buf = kmalloc(sizeof(*buf), GFP_ATOMIC); + if (!buf) + goto iommu_map_cmd_completion; + + buf->phys_addr = MHI_TRE_GET_PTR(pkt); + buf->len = MHI_TRE_GET_SIZE(pkt); + + iova = dma_map_resource(mhi_cntrl->dev, buf->phys_addr, + buf->len, DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(mhi_cntrl->dev, iova)) { + kfree(buf); + goto iommu_map_cmd_completion; + } + + buf->dma_addr = iova; + + mutex_lock(&sat_cntrl->list_mutex); + list_add_tail(&buf->node, + &sat_cntrl->addr_map_list); + mutex_unlock(&sat_cntrl->list_mutex); + + code = MHI_EV_CC_SUCCESS; + +iommu_map_cmd_completion: + MHI_SAT_LOG("IOMMU MAP 0x%llx CMD processing %s\n", + MHI_TRE_GET_PTR(pkt), + (code == MHI_EV_CC_SUCCESS) ? "successful" : + "failed"); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(iova); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + case MHI_PKT_TYPE_CTXT_UPDATE_CMD: + { + u64 ctxt_ptr = MHI_TRE_GET_PTR(pkt); + u64 ctxt_size = MHI_TRE_GET_SIZE(pkt); + int id = MHI_TRE_GET_ID(pkt); + enum sat_ctxt_type evt = MHI_TRE_IS_ER_CTXT_TYPE(pkt); + struct mhi_generic_ctxt gen_ctxt; + struct mhi_buf buf; + struct mhi_sat_device *sat_dev = find_sat_dev_by_id( + sat_cntrl, id, evt); + int ret; + + MHI_SAT_ASSERT(!sat_dev, + "No device with given chan/evt ID"); + + memset(&gen_ctxt, 0, sizeof(gen_ctxt)); + memset(&buf, 0, sizeof(buf)); + + gen_ctxt.type = MHI_CTXT_TYPE_GENERIC; + gen_ctxt.ctxt_base = ctxt_ptr; + gen_ctxt.ctxt_size = ctxt_size; + + buf.buf = &gen_ctxt; + buf.len = sizeof(gen_ctxt); + buf.name = TO_SAT_CTXT_TYPE_STR(evt); + + ret = mhi_device_configure(sat_dev->mhi_dev, + DMA_BIDIRECTIONAL, &buf, 1); + if (!ret) + code = MHI_EV_CC_SUCCESS; + + MHI_SAT_LOG("CTXT UPDATE CMD %s:%d processing %s\n", + buf.name, id, (code == MHI_EV_CC_SUCCESS) ? + "successful" : "failed"); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + case MHI_PKT_TYPE_START_CHAN_CMD: + { + int id = MHI_TRE_GET_ID(pkt); + struct mhi_sat_device *sat_dev = find_sat_dev_by_id( + sat_cntrl, id, + SAT_CTXT_TYPE_CHAN); + int ret; + + MHI_SAT_ASSERT(!sat_dev, + "No device with given channel ID\n"); + + MHI_SAT_ASSERT(sat_dev->chan_started, + "Channel already started!"); + + ret = mhi_prepare_for_transfer(sat_dev->mhi_dev); + if (!ret) { + sat_dev->chan_started = true; + code = MHI_EV_CC_SUCCESS; + } + + MHI_SAT_LOG("START CHANNEL %d CMD processing %s\n", + id, (code == MHI_EV_CC_SUCCESS) ? "successful" : + "failure"); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0(code); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + case MHI_PKT_TYPE_RESET_CHAN_CMD: + { + int id = MHI_TRE_GET_ID(pkt); + struct mhi_sat_device *sat_dev = + find_sat_dev_by_id(sat_cntrl, id, + SAT_CTXT_TYPE_CHAN); + + MHI_SAT_ASSERT(!sat_dev, + "No device with given channel ID\n"); + + MHI_SAT_ASSERT(!sat_dev->chan_started, + "Resetting unstarted channel!"); + + mhi_unprepare_from_transfer(sat_dev->mhi_dev); + sat_dev->chan_started = false; + + MHI_SAT_LOG( + "RESET CHANNEL %d CMD processing successful\n", + id); + + pkt->ptr = MHI_TRE_EVT_CMD_COMPLETION_PTR(0); + pkt->dword[0] = MHI_TRE_EVT_CMD_COMPLETION_D0( + MHI_EV_CC_SUCCESS); + pkt->dword[1] = MHI_TRE_EVT_CMD_COMPLETION_D1; + break; + } + default: + MHI_SAT_ASSERT(1, "Unhandled command!"); + break; + } + } +} + +static void mhi_sat_process_worker(struct work_struct *work) +{ + struct mhi_sat_cntrl *sat_cntrl = container_of(work, + struct mhi_sat_cntrl, process_work); + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct mhi_sat_packet *packet, *tmp; + struct sat_header *hdr; + struct sat_tre *pkt; + LIST_HEAD(head); + + MHI_SAT_LOG("Entered\n"); + + spin_lock_irq(&sat_cntrl->pkt_lock); + list_splice_tail_init(&sat_cntrl->packet_list, &head); + spin_unlock_irq(&sat_cntrl->pkt_lock); + + list_for_each_entry_safe(packet, tmp, &head, node) { + hdr = packet->msg; + pkt = SAT_TRE_OFFSET(packet->msg); + + list_del(&packet->node); + + mhi_sat_process_cmds(sat_cntrl, hdr, pkt); + + /* send response event(s) */ + mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_EVT, hdr->seq, + packet->msg, + SAT_MSG_SIZE(SAT_TRE_NUM_PKTS( + hdr->payload_size))); + + kfree(packet); + } + + MHI_SAT_LOG("Exited\n"); +} + +static void mhi_sat_connect_worker(struct work_struct *work) +{ + struct mhi_sat_cntrl *sat_cntrl = container_of(work, + struct mhi_sat_cntrl, connect_work); + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct sat_tre *pkt; + void *msg; + int ret; + + if (!subsys->rpdev || sat_cntrl->max_devices != sat_cntrl->num_devices + || sat_cntrl->active) + return; + + MHI_SAT_LOG("Entered\n"); + + msg = kmalloc(SAT_MSG_SIZE(3), GFP_ATOMIC); + if (!msg) + return; + + sat_cntrl->active = true; + + pkt = SAT_TRE_OFFSET(msg); + + /* prepare #1 MHI_CFG HELLO event */ + pkt->ptr = MHI_TRE_EVT_CFG_PTR(sat_cntrl->mhi_cntrl->base_addr); + pkt->dword[0] = MHI_TRE_EVT_CFG_D0(sat_cntrl->er_base, + sat_cntrl->num_er); + pkt->dword[1] = MHI_TRE_EVT_CFG_D1; + pkt++; + + /* prepare M0 event */ + pkt->ptr = MHI_TRE_EVT_MHI_STATE_PTR; + pkt->dword[0] = MHI_TRE_EVT_MHI_STATE_D0(MHI_STATE_M0); + pkt->dword[1] = MHI_TRE_EVT_MHI_STATE_D1; + pkt++; + + /* prepare AMSS event */ + pkt->ptr = MHI_TRE_EVT_EE_PTR; + pkt->dword[0] = MHI_TRE_EVT_EE_D0(MHI_EE_AMSS); + pkt->dword[1] = MHI_TRE_EVT_EE_D1; + + ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_EVT, SAT_RESERVED_SEQ_NUM, + msg, SAT_MSG_SIZE(3)); + kfree(msg); + if (ret) { + MHI_SAT_ERR("Failed to send hello packet:%d\n", ret); + sat_cntrl->active = false; + return; + } + + MHI_SAT_LOG("Device 0x%x sent hello packet\n", sat_cntrl->dev_id); +} + +static void mhi_sat_process_events(struct mhi_sat_cntrl *sat_cntrl, + struct sat_header *hdr, struct sat_tre *pkt) +{ + int num_pkts = SAT_TRE_NUM_PKTS(hdr->payload_size); + int i; + + for (i = 0; i < num_pkts; i++, pkt++) { + if (MHI_TRE_GET_TYPE(pkt) == + MHI_PKT_TYPE_CMD_COMPLETION_EVENT) { + if (hdr->reply_seq != sat_cntrl->last_cmd_seq) + continue; + + sat_cntrl->last_cmd_ccs = MHI_TRE_GET_CCS(pkt); + complete(&sat_cntrl->completion); + } + } +} + +static int mhi_sat_rpmsg_cb(struct rpmsg_device *rpdev, void *data, int len, + void *priv, u32 src) +{ + struct mhi_sat_subsys *subsys = dev_get_drvdata(&rpdev->dev); + struct sat_header *hdr = data; + struct sat_tre *pkt = SAT_TRE_OFFSET(data); + struct mhi_sat_cntrl *sat_cntrl; + struct mhi_sat_packet *packet; + + MHI_SAT_ASSERT(!mhi_sat_isvalid_header(hdr, len), "Invalid header!\n"); + + /* find controller packet was sent for */ + sat_cntrl = find_sat_cntrl_by_id(subsys, hdr->dev_id); + + MHI_SAT_ASSERT(!sat_cntrl, "Packet for unknown device!\n"); + + /* handle events directly regardless of controller active state */ + if (hdr->msg_id == SAT_MSG_ID_EVT) { + mhi_sat_process_events(sat_cntrl, hdr, pkt); + return 0; + } + + /* Inactive controller cannot process incoming commands */ + if (unlikely(!sat_cntrl->active)) { + MHI_SAT_ERR("Message for inactive controller!\n"); + return 0; + } + + /* offload commands to process worker */ + packet = kmalloc(sizeof(*packet) + len, GFP_ATOMIC); + if (!packet) + return 0; + + packet->cntrl = sat_cntrl; + packet->msg = packet + 1; + memcpy(packet->msg, data, len); + + spin_lock_irq(&sat_cntrl->pkt_lock); + list_add_tail(&packet->node, &sat_cntrl->packet_list); + spin_unlock_irq(&sat_cntrl->pkt_lock); + + schedule_work(&sat_cntrl->process_work); + + return 0; +} + +static void mhi_sat_rpmsg_remove(struct rpmsg_device *rpdev) +{ + struct mhi_sat_subsys *subsys = dev_get_drvdata(&rpdev->dev); + struct mhi_sat_cntrl *sat_cntrl; + struct mhi_sat_device *sat_dev; + struct mhi_buf *buf, *tmp; + + MHI_SUBSYS_LOG("Enter\n"); + + /* unprepare each controller/device from transfer */ + mutex_lock(&subsys->cntrl_mutex); + list_for_each_entry(sat_cntrl, &subsys->cntrl_list, node) { + sat_cntrl->active = false; + + flush_work(&sat_cntrl->connect_work); + flush_work(&sat_cntrl->process_work); + + mutex_lock(&sat_cntrl->list_mutex); + list_for_each_entry(sat_dev, &sat_cntrl->dev_list, node) { + if (sat_dev->chan_started) { + mhi_unprepare_from_transfer(sat_dev->mhi_dev); + sat_dev->chan_started = false; + } + } + + list_for_each_entry_safe(buf, tmp, &sat_cntrl->addr_map_list, + node) { + dma_unmap_resource(sat_cntrl->mhi_cntrl->dev, + buf->dma_addr, buf->len, + DMA_BIDIRECTIONAL, 0); + list_del(&buf->node); + kfree(buf); + } + mutex_unlock(&sat_cntrl->list_mutex); + + MHI_SAT_LOG("Removed RPMSG link\n"); + } + mutex_unlock(&subsys->cntrl_mutex); + + subsys->rpdev = NULL; +} + +static int mhi_sat_rpmsg_probe(struct rpmsg_device *rpdev) +{ + struct mhi_sat_subsys *subsys; + struct mhi_sat_cntrl *sat_cntrl; + const char *subsys_name; + int ret; + + ret = of_property_read_string(rpdev->dev.parent->of_node, "label", + &subsys_name); + if (ret) + return ret; + + /* find which subsystem has probed */ + subsys = find_subsys_by_name(subsys_name); + if (!subsys) + return -EINVAL; + + MHI_SUBSYS_LOG("Received RPMSG probe\n"); + + dev_set_drvdata(&rpdev->dev, subsys); + + subsys->rpdev = rpdev; + + /* schedule work for each controller as GLINK has connected */ + spin_lock_irq(&subsys->cntrl_lock); + list_for_each_entry(sat_cntrl, &subsys->cntrl_list, node) + schedule_work(&sat_cntrl->connect_work); + spin_unlock_irq(&subsys->cntrl_lock); + + return 0; +} + +static struct rpmsg_device_id mhi_sat_rpmsg_match_table[] = { + { .name = "mhi_sat" }, + { }, +}; + +static struct rpmsg_driver mhi_sat_rpmsg_driver = { + .id_table = mhi_sat_rpmsg_match_table, + .probe = mhi_sat_rpmsg_probe, + .remove = mhi_sat_rpmsg_remove, + .callback = mhi_sat_rpmsg_cb, + .drv = { + .name = "mhi,sat_rpmsg", + }, +}; + +static void mhi_sat_dev_status_cb(struct mhi_device *mhi_dev, + enum MHI_CB mhi_cb) +{ +} + +static void mhi_sat_dev_remove(struct mhi_device *mhi_dev) +{ + struct mhi_sat_device *sat_dev = mhi_device_get_devdata(mhi_dev); + struct mhi_sat_cntrl *sat_cntrl = sat_dev->cntrl; + struct mhi_sat_subsys *subsys = sat_cntrl->subsys; + struct mhi_buf *buf, *tmp; + struct sat_tre *pkt; + void *msg; + int ret; + + /* remove device node from probed list */ + mutex_lock(&sat_cntrl->list_mutex); + list_del(&sat_dev->node); + mutex_unlock(&sat_cntrl->list_mutex); + + sat_cntrl->num_devices--; + + /* prepare SYS_ERR command if first device is being removed */ + if (sat_cntrl->active) { + sat_cntrl->active = false; + + /* flush all pending work */ + flush_work(&sat_cntrl->connect_work); + flush_work(&sat_cntrl->process_work); + + msg = kmalloc(SAT_MSG_SIZE(1), GFP_KERNEL); + + MHI_SAT_ASSERT(!msg, "Unable to malloc for SYS_ERR message!\n"); + + pkt = SAT_TRE_OFFSET(msg); + pkt->ptr = MHI_TRE_CMD_SYS_ERR_PTR; + pkt->dword[0] = MHI_TRE_CMD_SYS_ERR_D0; + pkt->dword[1] = MHI_TRE_CMD_SYS_ERR_D1; + + /* acquire cmd_wait_mutex before sending command */ + mutex_lock(&sat_cntrl->cmd_wait_mutex); + + ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_CMD, + SAT_RESERVED_SEQ_NUM, msg, + SAT_MSG_SIZE(1)); + kfree(msg); + if (ret) { + MHI_SAT_ERR("Failed to notify SYS_ERR\n"); + mutex_unlock(&sat_cntrl->cmd_wait_mutex); + goto exit_sys_err_send; + } + + MHI_SAT_LOG("SYS_ERR command sent\n"); + + /* blocking call to wait for command completion event */ + mhi_sat_wait_cmd_completion(sat_cntrl); + + mutex_unlock(&sat_cntrl->cmd_wait_mutex); + } + +exit_sys_err_send: + /* exit if some devices are still present */ + if (sat_cntrl->num_devices) + return; + + /* remove address mappings */ + mutex_lock(&sat_cntrl->list_mutex); + list_for_each_entry_safe(buf, tmp, &sat_cntrl->addr_map_list, node) { + dma_unmap_resource(sat_cntrl->mhi_cntrl->dev, buf->dma_addr, + buf->len, DMA_BIDIRECTIONAL, 0); + list_del(&buf->node); + kfree(buf); + } + mutex_unlock(&sat_cntrl->list_mutex); + + /* remove controller */ + mutex_lock(&subsys->cntrl_mutex); + spin_lock_irq(&subsys->cntrl_lock); + list_del(&sat_cntrl->node); + spin_unlock_irq(&subsys->cntrl_lock); + mutex_unlock(&subsys->cntrl_mutex); + + mutex_destroy(&sat_cntrl->cmd_wait_mutex); + mutex_destroy(&sat_cntrl->list_mutex); + MHI_SAT_LOG("Satellite controller node removed\n"); + kfree(sat_cntrl); +} + +static int mhi_sat_dev_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct mhi_sat_device *sat_dev; + struct mhi_sat_cntrl *sat_cntrl; + struct device_node *of_node = mhi_dev->dev.of_node; + struct mhi_sat_subsys *subsys = &mhi_sat_driver.subsys[id->driver_data]; + u32 dev_id = MHI_SAT_CREATE_DEVICE_ID(mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot); + int ret; + + /* find controller with unique device ID based on topology */ + sat_cntrl = find_sat_cntrl_by_id(subsys, dev_id); + if (!sat_cntrl) { + sat_cntrl = kzalloc(sizeof(*sat_cntrl), GFP_KERNEL); + if (!sat_cntrl) + return -ENOMEM; + + /* + * max_devices will be read from device tree node. Set it to + * -1 before it is populated to avoid false positive when + * RPMSG probe schedules connect worker but no device has + * probed in which case num_devices and max_devices are both + * zero. + */ + sat_cntrl->max_devices = -1; + sat_cntrl->dev_id = dev_id; + sat_cntrl->er_base = mhi_dev->dl_event_id; + sat_cntrl->mhi_cntrl = mhi_dev->mhi_cntrl; + sat_cntrl->last_cmd_seq = SAT_RESERVED_SEQ_NUM; + sat_cntrl->subsys = subsys; + init_completion(&sat_cntrl->completion); + mutex_init(&sat_cntrl->list_mutex); + mutex_init(&sat_cntrl->cmd_wait_mutex); + spin_lock_init(&sat_cntrl->pkt_lock); + INIT_WORK(&sat_cntrl->connect_work, mhi_sat_connect_worker); + INIT_WORK(&sat_cntrl->process_work, mhi_sat_process_worker); + INIT_LIST_HEAD(&sat_cntrl->dev_list); + INIT_LIST_HEAD(&sat_cntrl->addr_map_list); + INIT_LIST_HEAD(&sat_cntrl->packet_list); + + mutex_lock(&subsys->cntrl_mutex); + spin_lock_irq(&subsys->cntrl_lock); + list_add(&sat_cntrl->node, &subsys->cntrl_list); + spin_unlock_irq(&subsys->cntrl_lock); + mutex_unlock(&subsys->cntrl_mutex); + + MHI_SAT_LOG("Controller allocated for 0x%x\n", dev_id); + } + + /* set maximum devices for subsystem from device tree */ + if (of_node) { + ret = of_property_read_u32(of_node, "mhi,max-devices", + &sat_cntrl->max_devices); + if (ret) { + MHI_SAT_ERR("Could not find max-devices in DT node\n"); + return -EINVAL; + } + } + + /* get event ring base and max indexes */ + sat_cntrl->er_base = min(sat_cntrl->er_base, mhi_dev->dl_event_id); + sat_cntrl->er_max = max(sat_cntrl->er_base, mhi_dev->dl_event_id); + + sat_dev = devm_kzalloc(&mhi_dev->dev, sizeof(*sat_dev), GFP_KERNEL); + if (!sat_dev) + return -ENOMEM; + + sat_dev->mhi_dev = mhi_dev; + sat_dev->cntrl = sat_cntrl; + + mutex_lock(&sat_cntrl->list_mutex); + list_add(&sat_dev->node, &sat_cntrl->dev_list); + mutex_unlock(&sat_cntrl->list_mutex); + + mhi_device_set_devdata(mhi_dev, sat_dev); + + sat_cntrl->num_devices++; + + /* schedule connect worker if all devices for controller have probed */ + if (sat_cntrl->num_devices == sat_cntrl->max_devices) { + /* number of event rings is 1 more than difference in IDs */ + sat_cntrl->num_er = (sat_cntrl->er_max - sat_cntrl->er_base) + + 1; + MHI_SAT_LOG("All satellite channels probed!\n"); + schedule_work(&sat_cntrl->connect_work); + } + + return 0; +} + +/* .driver_data stores subsys id */ +static const struct mhi_device_id mhi_sat_dev_match_table[] = { + /* ADSP */ + { .chan = "ADSP_0", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_1", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_2", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_3", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_4", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_5", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_6", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_7", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_8", .driver_data = SUBSYS_ADSP }, + { .chan = "ADSP_9", .driver_data = SUBSYS_ADSP }, + /* CDSP */ + { .chan = "CDSP_0", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_1", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_2", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_3", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_4", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_5", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_6", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_7", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_8", .driver_data = SUBSYS_CDSP }, + { .chan = "CDSP_9", .driver_data = SUBSYS_CDSP }, + /* SLPI */ + { .chan = "SLPI_0", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_1", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_2", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_3", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_4", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_5", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_6", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_7", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_8", .driver_data = SUBSYS_SLPI }, + { .chan = "SLPI_9", .driver_data = SUBSYS_SLPI }, + /* MODEM */ + { .chan = "MODEM_0", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_1", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_2", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_3", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_4", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_5", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_6", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_7", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_8", .driver_data = SUBSYS_MODEM }, + { .chan = "MODEM_9", .driver_data = SUBSYS_MODEM }, + {}, +}; + +static struct mhi_driver mhi_sat_dev_driver = { + .id_table = mhi_sat_dev_match_table, + .probe = mhi_sat_dev_probe, + .remove = mhi_sat_dev_remove, + .status_cb = mhi_sat_dev_status_cb, + .driver = { + .name = MHI_SAT_DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +int mhi_sat_trigger_init(void *data, u64 val) +{ + struct mhi_sat_subsys *subsys; + int i, ret; + + if (mhi_sat_driver.deferred_init_done) + return -EIO; + + ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver); + if (ret) + goto error_sat_trigger_init; + + ret = mhi_driver_register(&mhi_sat_dev_driver); + if (ret) + goto error_sat_trigger_register; + + mhi_sat_driver.deferred_init_done = true; + + return 0; + +error_sat_trigger_register: + unregister_rpmsg_driver(&mhi_sat_rpmsg_driver); + +error_sat_trigger_init: + subsys = mhi_sat_driver.subsys; + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + ipc_log_context_destroy(subsys->ipc_log); + mutex_destroy(&subsys->cntrl_mutex); + } + kfree(mhi_sat_driver.subsys); + mhi_sat_driver.subsys = NULL; + + return ret; +} + +DEFINE_SIMPLE_ATTRIBUTE(mhi_sat_debugfs_fops, NULL, + mhi_sat_trigger_init, "%llu\n"); + +static int mhi_sat_init(void) +{ + struct mhi_sat_subsys *subsys; + int i, ret; + + subsys = kcalloc(SUBSYS_MAX, sizeof(*subsys), GFP_KERNEL); + if (!subsys) + return -ENOMEM; + + mhi_sat_driver.subsys = subsys; + mhi_sat_driver.num_subsys = SUBSYS_MAX; + mhi_sat_driver.klog_lvl = KLOG_LVL; + mhi_sat_driver.ipc_log_lvl = IPC_LOG_LVL; + + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + char log[32]; + + subsys->name = subsys_names[i]; + mutex_init(&subsys->cntrl_mutex); + spin_lock_init(&subsys->cntrl_lock); + INIT_LIST_HEAD(&subsys->cntrl_list); + scnprintf(log, sizeof(log), "mhi_sat_%s", subsys->name); + subsys->ipc_log = ipc_log_context_create(IPC_LOG_PAGES, log, 0); + } + + /* create debugfs entry if defer_init is enabled */ + if (mhi_sat_defer_init) { + mhi_sat_driver.dentry = debugfs_create_dir("mhi_sat", NULL); + if (IS_ERR_OR_NULL(mhi_sat_driver.dentry)) { + ret = -ENODEV; + goto error_sat_init; + } + + debugfs_create_file("debug", 0444, mhi_sat_driver.dentry, NULL, + &mhi_sat_debugfs_fops); + + return 0; + } + + ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver); + if (ret) + goto error_sat_init; + + ret = mhi_driver_register(&mhi_sat_dev_driver); + if (ret) + goto error_sat_register; + + return 0; + +error_sat_register: + unregister_rpmsg_driver(&mhi_sat_rpmsg_driver); + +error_sat_init: + subsys = mhi_sat_driver.subsys; + for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) { + ipc_log_context_destroy(subsys->ipc_log); + mutex_destroy(&subsys->cntrl_mutex); + } + kfree(mhi_sat_driver.subsys); + mhi_sat_driver.subsys = NULL; + + return ret; +} + +module_init(mhi_sat_init); + diff --git a/wwan/driver/quectel_MHI/src/devices/mhi_uci.c b/wwan/driver/quectel_MHI/src/devices/mhi_uci.c new file mode 100644 index 0000000..474b022 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/mhi_uci.c @@ -0,0 +1,981 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#if 1 +static inline void *ipc_log_context_create(int max_num_pages, + const char *modname, uint16_t user_version) +{ return NULL; } +static inline int ipc_log_string(void *ilctxt, const char *fmt, ...) +{ return -EINVAL; } +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../core/mhi.h" + +#define DEVICE_NAME "mhi" +#define MHI_UCI_DRIVER_NAME "mhi_uci_q" + +struct uci_chan { + wait_queue_head_t wq; + spinlock_t lock; + struct list_head pending; /* user space waiting to read */ + struct uci_buf *cur_buf; /* current buffer user space reading */ + size_t rx_size; +}; + +struct uci_buf { + struct page *page; + void *data; + size_t len; + unsigned nr_trb; + struct list_head node; +}; + +struct uci_dev { + struct list_head node; + dev_t devt; + struct device *dev; + struct mhi_device *mhi_dev; + const char *chan; + struct mutex mutex; /* sync open and close */ + struct mutex r_mutex; + struct mutex w_mutex; + struct uci_chan ul_chan; + struct uci_chan dl_chan; + size_t mtu; + int ref_count; + bool enabled; + unsigned rx_error; + unsigned nr_trb; + unsigned nr_trbs; + struct uci_buf *uci_buf; + struct ktermios termios; + size_t bytes_xferd; +}; + +struct mhi_uci_drv { + struct list_head head; + struct mutex lock; + struct class *class; + int major; + dev_t dev_t; +}; + +static int uci_msg_lvl = MHI_MSG_LVL_ERROR; +module_param( uci_msg_lvl, uint, S_IRUGO | S_IWUSR); + +#define MSG_VERB(fmt, ...) do { \ + if (uci_msg_lvl <= MHI_MSG_LVL_VERBOSE) \ + pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MSG_LOG(fmt, ...) do { \ + if (uci_msg_lvl <= MHI_MSG_LVL_INFO) \ + pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MSG_ERR(fmt, ...) do { \ + if (uci_msg_lvl <= MHI_MSG_LVL_ERROR) \ + pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \ + } while (0) + +#define MAX_UCI_DEVICES (64) +#define QUEC_MHI_UCI_ALWAYS_OPEN //by now, sdx20 can not handle "start-reset-start" operation, so the simply solution is keep start state + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) +#ifdef TCGETS2 +__weak int user_termios_to_kernel_termios(struct ktermios *k, + struct termios2 __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios2)); +} +__weak int kernel_termios_to_user_termios(struct termios2 __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios2)); +} +__weak int user_termios_to_kernel_termios_1(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} +__weak int kernel_termios_to_user_termios_1(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} + +#else + +__weak int user_termios_to_kernel_termios(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)); +} +__weak int kernel_termios_to_user_termios(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)); +} +#endif /* TCGETS2 */ +#endif + +static DECLARE_BITMAP(uci_minors, MAX_UCI_DEVICES); +static struct mhi_uci_drv mhi_uci_drv; + +static int mhi_queue_inbound(struct uci_dev *uci_dev) +{ + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE); + size_t mtu = uci_dev->mtu; + void *buf; + struct uci_buf *uci_buf; + int ret = -EIO, i; + + if (uci_dev->uci_buf == NULL) { + uci_dev->nr_trb = 0; + uci_dev->nr_trbs = (nr_trbs + 1); + uci_dev->uci_buf = kmalloc_array(uci_dev->nr_trbs, sizeof(*uci_buf), GFP_KERNEL); + if (!uci_dev->uci_buf) + return -ENOMEM; + + uci_buf = uci_dev->uci_buf; + for (i = 0; i < uci_dev->nr_trbs; i++, uci_buf++) { + uci_buf->page = alloc_pages(GFP_KERNEL, get_order(mtu)); + if (!uci_buf->page) + return -ENOMEM; + uci_buf->data = page_address(uci_buf->page); + uci_buf->len = 0; + uci_buf->nr_trb = i; + if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN) { + //MSG_ERR("[%d] = %p\n", i, uci_buf->data); + } + } + } + + for (i = 0; i < nr_trbs; i++) { + #if 0 + buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + uci_buf = buf + mtu; + uci_buf->data = buf; + #else + uci_buf = &uci_dev->uci_buf[i]; + buf = uci_buf->data; + #endif + + MSG_VERB("Allocated buf %d of %d size %zu\n", i, nr_trbs, mtu); + + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu, + MHI_EOT); + if (ret) { + #if 0 + kfree(buf); + #endif + MSG_ERR("Failed to queue buffer %d\n", i); + return ret; + } + } + + return ret; +} + +static long mhi_uci_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + long ret = -ERESTARTSYS; + + mutex_lock(&uci_dev->mutex); + if (uci_dev->enabled) + ret = mhi_ioctl(mhi_dev, cmd, arg); + + if (uci_dev->enabled) { + switch (cmd) { + case TCGETS: +#ifndef TCGETS2 + ret = kernel_termios_to_user_termios((struct termios __user *)arg, &uci_dev->termios); +#else + ret = kernel_termios_to_user_termios_1((struct termios __user *)arg, &uci_dev->termios); +#endif + break; + + case TCSETSF: + case TCSETS: +#ifndef TCGETS2 + ret = user_termios_to_kernel_termios(&uci_dev->termios, (struct termios __user *)arg); +#else + ret = user_termios_to_kernel_termios_1(&uci_dev->termios, (struct termios __user *)arg); +#endif + break; + + case TCFLSH: + ret = 0; + break; + + default: + break; + } + } + mutex_unlock(&uci_dev->mutex); + + return ret; +} + +static int mhi_uci_release(struct inode *inode, struct file *file) +{ + struct uci_dev *uci_dev = file->private_data; + + mutex_lock(&uci_dev->mutex); + uci_dev->ref_count--; + if (!uci_dev->ref_count) { + struct uci_chan *uci_chan; + + MSG_LOG("Last client left, closing node\n"); + + if (uci_dev->enabled) + mhi_unprepare_from_transfer(uci_dev->mhi_dev); + + /* clean inbound channel */ + uci_chan = &uci_dev->dl_chan; + if (uci_dev->uci_buf) { + unsigned nr_trb = 0; + + for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) { + if (uci_dev->uci_buf[nr_trb].page) + __free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu)); + } + kfree(uci_dev->uci_buf); + } + + uci_chan->cur_buf = NULL; + + if (!uci_dev->enabled) { + MSG_LOG("Node is deleted, freeing dev node\n"); + mutex_unlock(&uci_dev->mutex); + mutex_destroy(&uci_dev->mutex); + clear_bit(MINOR(uci_dev->devt), uci_minors); + kfree(uci_dev); + return 0; + } + } + + MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count); + + mutex_unlock(&uci_dev->mutex); + + return 0; +} + +static unsigned int mhi_uci_poll(struct file *file, poll_table *wait) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan; + unsigned int mask = 0; + + poll_wait(file, &uci_dev->dl_chan.wq, wait); + // ADPL and QDSS do not need poll write. xingduo.du 2023-02-16 + // poll_wait(file, &uci_dev->ul_chan.wq, wait); + + uci_chan = &uci_dev->dl_chan; + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + mask = POLLERR; + } else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) { + MSG_VERB("Client can read from node\n"); + mask |= POLLIN | POLLRDNORM; + } + spin_unlock_bh(&uci_chan->lock); + + // ADPL and QDSS are single channel, ul_chan not be initilized. xingduo.du 2023-02-27 + if (mhi_dev->ul_chan) { + poll_wait(file, &uci_dev->ul_chan.wq, wait); + uci_chan = &uci_dev->ul_chan; + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + mask |= POLLERR; + } else if (mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE) > 0) { + MSG_VERB("Client can write to node\n"); + mask |= POLLOUT | POLLWRNORM; + } + + if (!uci_dev->enabled) + mask |= POLLHUP; + if (uci_dev->rx_error) + mask |= POLLERR; + + spin_unlock_bh(&uci_chan->lock); + } + + MSG_LOG("Client attempted to poll, returning mask 0x%x\n", mask); + + return mask; +} + +static ssize_t mhi_uci_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *offp) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan = &uci_dev->ul_chan; + size_t bytes_xfered = 0; + int ret, nr_avail; + + if (!buf || !count || uci_dev->rx_error) + return -EINVAL; + + /* confirm channel is active */ + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + spin_unlock_bh(&uci_chan->lock); + return -ERESTARTSYS; + } + + MSG_VERB("Enter: to xfer:%zu bytes\n", count); + + while (count) { + size_t xfer_size; + void *kbuf; + enum MHI_FLAGS flags; + + spin_unlock_bh(&uci_chan->lock); + + nr_avail = mhi_get_no_free_descriptors(mhi_dev, DMA_TO_DEVICE); + if ((nr_avail == 0) && (file->f_flags & O_NONBLOCK)) + return -EAGAIN; + + /* wait for free descriptors */ + ret = wait_event_interruptible(uci_chan->wq, + (!uci_dev->enabled) || + (nr_avail = mhi_get_no_free_descriptors(mhi_dev, + DMA_TO_DEVICE)) > 0); + + if (ret == -ERESTARTSYS || !uci_dev->enabled) { + MSG_LOG("Exit signal caught for node or not enabled\n"); + return -ERESTARTSYS; + } + + xfer_size = min_t(size_t, count, uci_dev->mtu); + kbuf = kmalloc(xfer_size, GFP_KERNEL); + if (!kbuf) { + MSG_ERR("Failed to allocate memory %zu\n", xfer_size); + return -ENOMEM; + } + + ret = copy_from_user(kbuf, buf, xfer_size); + if (unlikely(ret)) { + kfree(kbuf); + return ret; + } + + spin_lock_bh(&uci_chan->lock); + + /* if ring is full after this force EOT */ + if (nr_avail > 1 && (count - xfer_size)) + flags = MHI_CHAIN; + else + flags = MHI_EOT; + + if (uci_dev->enabled) + ret = mhi_queue_transfer(mhi_dev, DMA_TO_DEVICE, kbuf, + xfer_size, flags); + else + ret = -ERESTARTSYS; + + if (ret) { + kfree(kbuf); + goto sys_interrupt; + } + + bytes_xfered += xfer_size; + count -= xfer_size; + buf += xfer_size; + } + + spin_unlock_bh(&uci_chan->lock); + MSG_VERB("Exit: Number of bytes xferred:%zu\n", bytes_xfered); + + return bytes_xfered; + +sys_interrupt: + spin_unlock_bh(&uci_chan->lock); + + return ret; +} + +static ssize_t mhi_uci_read(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct uci_dev *uci_dev = file->private_data; + struct mhi_device *mhi_dev = uci_dev->mhi_dev; + struct uci_chan *uci_chan = &uci_dev->dl_chan; + struct uci_buf *uci_buf; + char *ptr; + size_t to_copy; + int ret = 0; + + if (!buf || uci_dev->rx_error) + return -EINVAL; + + MSG_VERB("Client provided buf len:%zu\n", count); + + /* confirm channel is active */ + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + spin_unlock_bh(&uci_chan->lock); + return -ERESTARTSYS; + } + + /* No data available to read, wait */ + if (!uci_chan->cur_buf && list_empty(&uci_chan->pending)) { + MSG_VERB("No data available to read waiting\n"); + + spin_unlock_bh(&uci_chan->lock); + + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + ret = wait_event_interruptible(uci_chan->wq, + (!uci_dev->enabled || + !list_empty(&uci_chan->pending))); + if (ret == -ERESTARTSYS) { + MSG_LOG("Exit signal caught for node\n"); + return -ERESTARTSYS; + } + + spin_lock_bh(&uci_chan->lock); + if (!uci_dev->enabled) { + MSG_LOG("node is disabled\n"); + ret = -ERESTARTSYS; + goto read_error; + } + } + + /* new read, get the next descriptor from the list */ + if (!uci_chan->cur_buf) { + uci_buf = list_first_entry_or_null(&uci_chan->pending, + struct uci_buf, node); + if (unlikely(!uci_buf)) { + ret = -EIO; + goto read_error; + } + + if (uci_buf->node.next == LIST_POISON1 || uci_buf->node.prev == LIST_POISON1) { + dump_stack(); + ret = -EIO; + MSG_ERR("chan[%d] data=%p, len=%zd, nr_trb=%d\n", + mhi_dev->dl_chan_id, uci_buf->data, uci_buf->len, uci_buf->nr_trb); + goto read_error; + } + + list_del(&uci_buf->node); + uci_chan->cur_buf = uci_buf; + uci_chan->rx_size = uci_buf->len; + MSG_VERB("Got pkt of size:%zu\n", uci_chan->rx_size); + } + + uci_buf = uci_chan->cur_buf; + spin_unlock_bh(&uci_chan->lock); + + /* Copy the buffer to user space */ + to_copy = min_t(size_t, count, uci_chan->rx_size); + ptr = uci_buf->data + (uci_buf->len - uci_chan->rx_size); + ret = copy_to_user(buf, ptr, to_copy); + if (ret) + return ret; + + MSG_VERB("Copied %zu of %zu bytes\n", to_copy, uci_chan->rx_size); + uci_chan->rx_size -= to_copy; + + /* we finished with this buffer, queue it back to hardware */ + if (!uci_chan->rx_size) { + spin_lock_bh(&uci_chan->lock); + uci_chan->cur_buf = NULL; + + if (uci_dev->enabled) +#if 1 //this can make the address in ring do not change + { + if (uci_buf->page) { + unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1); + + uci_buf = &uci_dev->uci_buf[nr_trb]; + ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, + uci_buf->data, uci_dev->mtu, + MHI_EOT); + } else { + kfree(uci_buf); + ret = 0; + } + } +#endif + else + ret = -ERESTARTSYS; + + if (ret) { + MSG_ERR("Failed to recycle element for chan:%d , ret=%d\n", mhi_dev->ul_chan_id, ret); +#if 0 + kfree(uci_buf->data); +#endif + goto read_error; + } + + spin_unlock_bh(&uci_chan->lock); + } + + MSG_VERB("Returning %zu bytes\n", to_copy); + + return to_copy; + +read_error: + spin_unlock_bh(&uci_chan->lock); + + return ret; +} + +static ssize_t mhi_uci_write_mutex(struct file *file, + const char __user *buf, + size_t count, + loff_t *offp) +{ + struct uci_dev *uci_dev = file->private_data; + int ret; + + ret = mutex_lock_interruptible(&uci_dev->w_mutex); /*concurrent writes */ + if (ret < 0) + return -ERESTARTSYS; + + ret = mhi_uci_write(file, buf, count, offp); + mutex_unlock(&uci_dev->w_mutex); + + return ret; +} + +static ssize_t mhi_uci_read_mutex(struct file *file, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct uci_dev *uci_dev = file->private_data; + int ret; + + ret = mutex_lock_interruptible(&uci_dev->r_mutex); /*concurrent reads */ + if (ret < 0) + return -ERESTARTSYS; + + ret = mhi_uci_read(file, buf, count, ppos); + mutex_unlock(&uci_dev->r_mutex); + + return ret; +} + +static int mhi_uci_open(struct inode *inode, struct file *filp) +{ + struct uci_dev *uci_dev = NULL, *tmp_dev; + int ret = -EIO; + struct uci_chan *dl_chan; + + mutex_lock(&mhi_uci_drv.lock); + list_for_each_entry(tmp_dev, &mhi_uci_drv.head, node) { + if (tmp_dev->devt == inode->i_rdev) { + uci_dev = tmp_dev; + break; + } + } + + /* could not find a minor node */ + if (!uci_dev) + goto error_exit; + + mutex_lock(&uci_dev->mutex); + if (!uci_dev->enabled) { + MSG_ERR("Node exist, but not in active state!\n"); + goto error_open_chan; + } + + uci_dev->ref_count++; + + MSG_LOG("Node open, ref counts %u\n", uci_dev->ref_count); + + if (uci_dev->ref_count == 1) { + MSG_LOG("Starting channel\n"); + ret = mhi_prepare_for_transfer(uci_dev->mhi_dev); + if (ret) { + MSG_ERR("Error starting transfer channels\n"); + uci_dev->ref_count--; + goto error_open_chan; + } + + ret = mhi_queue_inbound(uci_dev); + if (ret) + goto error_rx_queue; + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + uci_dev->ref_count++; +#endif + } + + filp->private_data = uci_dev; + mutex_unlock(&uci_dev->mutex); + mutex_unlock(&mhi_uci_drv.lock); + + return 0; + + error_rx_queue: + dl_chan = &uci_dev->dl_chan; + mhi_unprepare_from_transfer(uci_dev->mhi_dev); + if (uci_dev->uci_buf) { + unsigned nr_trb = 0; + + for (nr_trb = 0; nr_trb < uci_dev->nr_trbs; nr_trb++) { + if (uci_dev->uci_buf[nr_trb].page) + __free_pages(uci_dev->uci_buf[nr_trb].page, get_order(uci_dev->mtu)); + } + kfree(uci_dev->uci_buf); + } + + error_open_chan: + mutex_unlock(&uci_dev->mutex); + +error_exit: + mutex_unlock(&mhi_uci_drv.lock); + + return ret; +} + +static const struct file_operations mhidev_fops = { + .open = mhi_uci_open, + .release = mhi_uci_release, + .read = mhi_uci_read_mutex, + .write = mhi_uci_write_mutex, + .poll = mhi_uci_poll, + .unlocked_ioctl = mhi_uci_ioctl, +}; + +static void mhi_uci_remove(struct mhi_device *mhi_dev) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + + MSG_LOG("Enter\n"); + + + mutex_lock(&mhi_uci_drv.lock); + mutex_lock(&uci_dev->mutex); + + /* disable the node */ + spin_lock_irq(&uci_dev->dl_chan.lock); + spin_lock_irq(&uci_dev->ul_chan.lock); + uci_dev->enabled = false; + spin_unlock_irq(&uci_dev->ul_chan.lock); + spin_unlock_irq(&uci_dev->dl_chan.lock); + wake_up(&uci_dev->dl_chan.wq); + wake_up(&uci_dev->ul_chan.wq); + + /* delete the node to prevent new opens */ + device_destroy(mhi_uci_drv.class, uci_dev->devt); + uci_dev->dev = NULL; + list_del(&uci_dev->node); + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + if (uci_dev->ref_count > 0) + uci_dev->ref_count--; +#endif + + /* safe to free memory only if all file nodes are closed */ + if (!uci_dev->ref_count) { + mutex_unlock(&uci_dev->mutex); + mutex_destroy(&uci_dev->mutex); + clear_bit(MINOR(uci_dev->devt), uci_minors); + kfree(uci_dev); + mutex_unlock(&mhi_uci_drv.lock); + return; + } + + MSG_LOG("Exit\n"); + mutex_unlock(&uci_dev->mutex); + mutex_unlock(&mhi_uci_drv.lock); + +} + +static int mhi_uci_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) +{ + struct uci_dev *uci_dev; + int minor; + char node_name[32]; + int dir; + + uci_dev = kzalloc(sizeof(*uci_dev), GFP_KERNEL); + if (!uci_dev) + return -ENOMEM; + + mutex_init(&uci_dev->mutex); + mutex_init(&uci_dev->r_mutex); + mutex_init(&uci_dev->w_mutex); + uci_dev->mhi_dev = mhi_dev; + + minor = find_first_zero_bit(uci_minors, MAX_UCI_DEVICES); + if (minor >= MAX_UCI_DEVICES) { + kfree(uci_dev); + return -ENOSPC; + } + + mutex_lock(&uci_dev->mutex); + mutex_lock(&mhi_uci_drv.lock); + + uci_dev->devt = MKDEV(mhi_uci_drv.major, minor); +#if 1 + if (mhi_dev->mhi_cntrl->cntrl_idx) + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%s%d", + mhi_dev->chan_name, mhi_dev->mhi_cntrl->cntrl_idx); + else + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%s", + mhi_dev->chan_name); +#else + uci_dev->dev = device_create(mhi_uci_drv.class, &mhi_dev->dev, + uci_dev->devt, uci_dev, + DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d", + mhi_dev->dev_id, mhi_dev->domain, + mhi_dev->bus, mhi_dev->slot, "_pipe_", + mhi_dev->ul_chan_id); +#endif + + set_bit(minor, uci_minors); + + /* create debugging buffer */ + snprintf(node_name, sizeof(node_name), "mhi_uci_%04x_%02u.%02u.%02u_%d", + mhi_dev->dev_id, mhi_dev->domain, mhi_dev->bus, mhi_dev->slot, + mhi_dev->ul_chan_id); + + for (dir = 0; dir < 2; dir++) { + struct uci_chan *uci_chan = (dir) ? + &uci_dev->ul_chan : &uci_dev->dl_chan; + spin_lock_init(&uci_chan->lock); + init_waitqueue_head(&uci_chan->wq); + INIT_LIST_HEAD(&uci_chan->pending); + } + + uci_dev->termios = tty_std_termios; + + uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu); + mhi_device_set_devdata(mhi_dev, uci_dev); + uci_dev->enabled = true; + + list_add(&uci_dev->node, &mhi_uci_drv.head); + mutex_unlock(&mhi_uci_drv.lock); + mutex_unlock(&uci_dev->mutex); + + MSG_LOG("channel:%s successfully probed\n", mhi_dev->chan_name); + + return 0; +}; + +static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + struct uci_chan *uci_chan = &uci_dev->ul_chan; + + MSG_VERB("status:%d xfer_len:%zu\n", mhi_result->transaction_status, + mhi_result->bytes_xferd); + + kfree(mhi_result->buf_addr); + if (!mhi_result->transaction_status) + wake_up(&uci_chan->wq); +} + +static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev, + struct mhi_result *mhi_result) +{ + struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev); + struct uci_chan *uci_chan = &uci_dev->dl_chan; + unsigned long flags; + struct uci_buf *buf; + unsigned nr_trb = uci_dev->nr_trb; + + buf = &uci_dev->uci_buf[nr_trb]; + if (buf == NULL) { + MSG_ERR("buf = NULL"); + return; + } + if (buf->nr_trb != nr_trb || buf->data != mhi_result->buf_addr) + { + uci_dev->rx_error++; + MSG_ERR("chan[%d]: uci_buf[%u] = %p , mhi_result[%u] = %p\n", + mhi_dev->dl_chan_id, buf->nr_trb, buf->data, nr_trb, mhi_result->buf_addr); + return; + } + + uci_dev->nr_trb++; + if (uci_dev->nr_trb == uci_dev->nr_trbs) + uci_dev->nr_trb = 0; + + if (mhi_result->transaction_status == -ENOTCONN) { + return; + } + + if (mhi_result->bytes_xferd > uci_dev->mtu || mhi_result->bytes_xferd <= 0) + { + MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n", + mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu); + return; + } + if (mhi_result->bytes_xferd > uci_dev->bytes_xferd) + { + uci_dev->bytes_xferd = mhi_result->bytes_xferd; + //MSG_ERR("chan[%d]: bytes_xferd = %zd , mtu = %zd\n", + // mhi_dev->dl_chan_id, mhi_result->bytes_xferd, uci_dev->mtu); + } + + MSG_VERB("status:%d receive_len:%zu\n", mhi_result->transaction_status, + mhi_result->bytes_xferd); + + spin_lock_irqsave(&uci_chan->lock, flags); +#if 0 + buf = mhi_result->buf_addr + uci_dev->mtu; + buf->data = mhi_result->buf_addr; +#endif + buf->len = mhi_result->bytes_xferd; + if (mhi_dev->dl_chan_id == MHI_CLIENT_DUN_IN + || mhi_dev->dl_chan_id == MHI_CLIENT_QMI_IN + || mhi_dev->dl_chan_id == MHI_CLIENT_MBIM_IN) + { + struct uci_buf *tmp_buf = NULL; + int skip_buf = 0; + +#ifdef QUEC_MHI_UCI_ALWAYS_OPEN + if (uci_dev->ref_count == 1) + skip_buf++; +#endif + if (!skip_buf) + tmp_buf = (struct uci_buf *)kmalloc(buf->len + sizeof(struct uci_buf), GFP_ATOMIC);; + + if (tmp_buf) { + tmp_buf->page = NULL; + tmp_buf->data = ((void *)tmp_buf) + sizeof(struct uci_buf); + tmp_buf->len = buf->len; + memcpy(tmp_buf->data, buf->data, buf->len); + } + + if (buf) { + struct uci_buf *uci_buf = buf; + unsigned nr_trb = uci_buf->nr_trb ? (uci_buf->nr_trb - 1) : (uci_dev->nr_trbs - 1); + + uci_buf = &uci_dev->uci_buf[nr_trb]; + mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, uci_buf->data, uci_dev->mtu, MHI_EOT); + } + + buf = tmp_buf; + } + + if (buf) + list_add_tail(&buf->node, &uci_chan->pending); + spin_unlock_irqrestore(&uci_chan->lock, flags); + +#ifdef CONFIG_PM_SLEEP + if (mhi_dev->dev.power.wakeup) + __pm_wakeup_event(mhi_dev->dev.power.wakeup, 0); +#endif + + wake_up(&uci_chan->wq); +} + +// repaire sdx6x module can not read qdb file. xingduo.du 2023-01-18 +#define DIAG_MAX_PCIE_PKT_SZ 8192 //define by module + +/* .driver_data stores max mtu */ +static const struct mhi_device_id mhi_uci_match_table[] = { + { .chan = "LOOPBACK", .driver_data = 0x1000 }, + { .chan = "SAHARA", .driver_data = 0x4000 }, + { .chan = "EDL", .driver_data = 0x4000 }, + { .chan = "DIAG", .driver_data = DIAG_MAX_PCIE_PKT_SZ }, + { .chan = "MBIM", .driver_data = 0x1000 }, + { .chan = "QMI0", .driver_data = 0x1000 }, + { .chan = "QMI1", .driver_data = 0x1000 }, + { .chan = "DUN", .driver_data = 0x1000 }, +#ifdef ENABLE_ADPL + { .chan = "ADPL", .driver_data = 0x1000 }, +#endif +#ifdef ENABLE_QDSS + { .chan = "QDSS", .driver_data = 0x1000 }, +#endif + {}, +}; + +static struct mhi_driver mhi_uci_driver = { + .id_table = mhi_uci_match_table, + .remove = mhi_uci_remove, + .probe = mhi_uci_probe, + .ul_xfer_cb = mhi_ul_xfer_cb, + .dl_xfer_cb = mhi_dl_xfer_cb, + .driver = { + .name = MHI_UCI_DRIVER_NAME, + .owner = THIS_MODULE, + }, +}; + +int mhi_device_uci_init(void) +{ + int ret; + + ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops); + if (ret < 0) + return ret; + + mhi_uci_drv.major = ret; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0)) + mhi_uci_drv.class = class_create(MHI_UCI_DRIVER_NAME); +#else + mhi_uci_drv.class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME); +#endif + if (IS_ERR(mhi_uci_drv.class)) { + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); + return -ENODEV; + } + + mutex_init(&mhi_uci_drv.lock); + INIT_LIST_HEAD(&mhi_uci_drv.head); + + ret = mhi_driver_register(&mhi_uci_driver); + if (ret) { + class_destroy(mhi_uci_drv.class); + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); + } + + return ret; +} + +void mhi_device_uci_exit(void) +{ + mhi_driver_unregister(&mhi_uci_driver); + class_destroy(mhi_uci_drv.class); + unregister_chrdev(mhi_uci_drv.major, MHI_UCI_DRIVER_NAME); +} diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/Kconfig b/wwan/driver/quectel_MHI/src/devices/rmnet/Kconfig new file mode 100644 index 0000000..9bb06d2 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/Kconfig @@ -0,0 +1,13 @@ +# +# RMNET MAP driver +# + +menuconfig RMNET + tristate "RmNet MAP driver" + default n + select GRO_CELLS + ---help--- + If you select this, you will enable the RMNET module which is used + for handling data in the multiplexing and aggregation protocol (MAP) + format in the embedded data path. RMNET devices can be attached to + any IP mode physical device. diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/Makefile b/wwan/driver/quectel_MHI/src/devices/rmnet/Makefile new file mode 100644 index 0000000..b175fbb --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the RMNET module +# + +rmnet-y := rmnet_config.o +rmnet-y += rmnet_vnd.o +rmnet-y += rmnet_handlers.o +rmnet-y += rmnet_map_data.o +rmnet-y += rmnet_map_command.o +rmnet-y += rmnet_descriptor.o +obj-$(CONFIG_RMNET) += rmnet.o diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.c b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.c new file mode 100644 index 0000000..c5ec0c8 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.c @@ -0,0 +1,141 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET configuration engine + * + */ + +#include +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_handlers.h" +#include "rmnet_vnd.h" +#include "rmnet_private.h" +#include "rmnet_map.h" +#include "rmnet_descriptor.h" + +/* Locking scheme - + * The shared resource which needs to be protected is realdev->rx_handler_data. + * For the writer path, this is using rtnl_lock(). The writer paths are + * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These + * paths are already called with rtnl_lock() acquired in. There is also an + * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For + * dereference here, we will need to use rtnl_dereference(). Dev list writing + * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link(). + * For the reader path, the real_dev->rx_handler_data is called in the TX / RX + * path. We only need rcu_read_lock() for these scenarios. In these cases, + * the rcu_read_lock() is held in __dev_queue_xmit() and + * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl() + * to get the relevant information. For dev list reading, we again acquire + * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu(). + * We also use unregister_netdevice_many() to free all rmnet devices in + * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in + * same context. + */ + +/* Local Definitions and Declarations */ + +static int rmnet_is_real_dev_registered(const struct net_device *real_dev) +{ + return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; +} + +/* Needs rtnl lock */ +static struct rmnet_port* +rmnet_get_port_rtnl(const struct net_device *real_dev) +{ + return rtnl_dereference(real_dev->rx_handler_data); +} + +static int rmnet_unregister_real_device(struct net_device *real_dev, + struct rmnet_port *port) +{ + if (port->nr_rmnet_devs) + return -EINVAL; + + rmnet_map_cmd_exit(port); + rmnet_map_tx_aggregate_exit(port); + + rmnet_descriptor_deinit(port); + + kfree(port); + + netdev_rx_handler_unregister(real_dev); + + /* release reference on real_dev */ + dev_put(real_dev); + + netdev_dbg(real_dev, "Removed from rmnet\n"); + return 0; +} + +static int rmnet_register_real_device(struct net_device *real_dev) +{ + struct rmnet_port *port; + int rc, entry; + + ASSERT_RTNL(); + + if (rmnet_is_real_dev_registered(real_dev)) + return 0; + + port = kzalloc(sizeof(*port), GFP_ATOMIC); + if (!port) + return -ENOMEM; + + port->dev = real_dev; + rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port); + if (rc) { + kfree(port); + return -EBUSY; + } + /* hold on to real dev for MAP data */ + dev_hold(real_dev); + + for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++) + INIT_HLIST_HEAD(&port->muxed_ep[entry]); + + rc = rmnet_descriptor_init(port); + if (rc) { + rmnet_descriptor_deinit(port); + return rc; + } + + rmnet_map_tx_aggregate_init(port); + rmnet_map_cmd_init(port); + + netdev_dbg(real_dev, "registered with rmnet\n"); + return 0; +} + +/* Needs either rcu_read_lock() or rtnl lock */ +static struct rmnet_port *rmnet_get_port(struct net_device *real_dev) +{ + if (rmnet_is_real_dev_registered(real_dev)) + return rcu_dereference_rtnl(real_dev->rx_handler_data); + else + return NULL; +} + +static struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id) +{ + struct rmnet_endpoint *ep; + + hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) { + if (ep->mux_id == mux_id) + return ep; + } + + return NULL; +} diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.h b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.h new file mode 100644 index 0000000..c74fcdf --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_config.h @@ -0,0 +1,174 @@ +/* Copyright (c) 2013-2017, 2019 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data configuration engine + * + */ + +#include +#include + +#ifndef _RMNET_CONFIG_H_ +#define _RMNET_CONFIG_H_ + +#define RMNET_MAX_LOGICAL_EP 255 +#define RMNET_MAX_VEID 4 + +struct rmnet_endpoint { + u8 mux_id; + struct net_device *egress_dev; + struct hlist_node hlnode; +}; + +struct rmnet_port_priv_stats { + u64 dl_hdr_last_qmap_vers; + u64 dl_hdr_last_ep_id; + u64 dl_hdr_last_trans_id; + u64 dl_hdr_last_seq; + u64 dl_hdr_last_bytes; + u64 dl_hdr_last_pkts; + u64 dl_hdr_last_flows; + u64 dl_hdr_count; + u64 dl_hdr_total_bytes; + u64 dl_hdr_total_pkts; + u64 dl_trl_last_seq; + u64 dl_trl_count; +}; + +struct rmnet_egress_agg_params { + u16 agg_size; + u16 agg_count; + u32 agg_time; +}; + +/* One instance of this structure is instantiated for each real_dev associated + * with rmnet. + */ +struct rmnet_port { + struct net_device *dev; + u32 data_format; + u8 nr_rmnet_devs; + u8 rmnet_mode; + struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; + struct net_device *bridge_ep; + void *rmnet_perf; + + struct rmnet_egress_agg_params egress_agg_params; + + /* Protect aggregation related elements */ + spinlock_t agg_lock; + + struct sk_buff *agg_skb; + int agg_state; + u8 agg_count; + struct timespec agg_time; + struct timespec agg_last; + struct hrtimer hrtimer; + struct work_struct agg_wq; + + /* dl marker elements */ + struct list_head dl_list; + struct rmnet_port_priv_stats stats; + int dl_marker_flush; + + /* Descriptor pool */ + spinlock_t desc_pool_lock; + struct rmnet_frag_descriptor_pool *frag_desc_pool; + struct sk_buff *chain_head; + struct sk_buff *chain_tail; +}; + +extern struct rtnl_link_ops rmnet_link_ops; + +struct rmnet_vnd_stats { + u64 rx_pkts; + u64 rx_bytes; + u64 tx_pkts; + u64 tx_bytes; + u32 tx_drops; +}; + +struct rmnet_pcpu_stats { + struct rmnet_vnd_stats stats; + struct u64_stats_sync syncp; +}; + +struct rmnet_coal_close_stats { + u64 non_coal; + u64 ip_miss; + u64 trans_miss; + u64 hw_nl; + u64 hw_pkt; + u64 hw_byte; + u64 hw_time; + u64 hw_evict; + u64 coal; +}; + +struct rmnet_coal_stats { + u64 coal_rx; + u64 coal_pkts; + u64 coal_hdr_nlo_err; + u64 coal_hdr_pkt_err; + u64 coal_csum_err; + u64 coal_reconstruct; + u64 coal_ip_invalid; + u64 coal_trans_invalid; + struct rmnet_coal_close_stats close; + u64 coal_veid[RMNET_MAX_VEID]; +}; + +struct rmnet_priv_stats { + u64 csum_ok; + u64 csum_valid_unset; + u64 csum_validation_failed; + u64 csum_err_bad_buffer; + u64 csum_err_invalid_ip_version; + u64 csum_err_invalid_transport; + u64 csum_fragmented_pkt; + u64 csum_skipped; + u64 csum_sw; + u64 csum_hw; + struct rmnet_coal_stats coal; +}; + +struct rmnet_priv { + u8 mux_id; + struct net_device *real_dev; + struct rmnet_pcpu_stats __percpu *pcpu_stats; + struct gro_cells gro_cells; + struct rmnet_priv_stats stats; +}; + +enum rmnet_dl_marker_prio { + RMNET_PERF, + RMNET_SHS, +}; + +enum rmnet_trace_func { + RMNET_MODULE, + NW_STACK_MODULE, +}; + +enum rmnet_trace_evt { + RMNET_DLVR_SKB, + RMNET_RCV_FROM_PND, + RMNET_TX_UL_PKT, + NW_STACK_DEV_Q_XMIT, + NW_STACK_NAPI_GRO_FLUSH, + NW_STACK_RX, + NW_STACK_TX, +}; + +static int rmnet_is_real_dev_registered(const struct net_device *real_dev); +static struct rmnet_port *rmnet_get_port(struct net_device *real_dev); +static struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id); +#endif /* _RMNET_CONFIG_H_ */ diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_data.c b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_data.c new file mode 100644 index 0000000..ad8953c --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_data.c @@ -0,0 +1,1150 @@ +#if 0 + +#define RMNET_MAX_PACKET_SIZE 16384 +#define RMNET_DFLT_PACKET_SIZE 1500 +#define RMNET_NEEDED_HEADROOM 16 +#define RMNET_TX_QUEUE_LEN 1000 + +#define RMNET_MAX_LOGICAL_EP 255 +#define RMNET_MAP_DESC_HEADROOM 128 +#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64 + +/* Pass the frame up the stack with no modifications to skb->dev */ +#define RMNET_EPMODE_NONE (0) +/* Replace skb->dev to a virtual rmnet device and pass up the stack */ +#define RMNET_EPMODE_VND (1) +/* Pass the frame directly to another device with dev_queue_xmit() */ +#define RMNET_EPMODE_BRIDGE (2) + +/* rmnet section */ + +#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) +#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_COALESCE (1U << 4) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 5) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 6) + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +/* Main QMAP header */ +struct rmnet_map_header { + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; + __be16 reserved; +} __aligned(1); + +struct rmnet_map_v5_nl_pair { + __be16 pkt_len; + u8 csum_error_bitmap; + u8 num_packets; +} __aligned(1); + +/* NLO: Number-length object */ +#define RMNET_MAP_V5_MAX_NLOS (6) +#define RMNET_MAP_V5_MAX_PACKETS (48) + +struct rmnet_map_v5_coal_header { + u8 next_hdr:1; + u8 header_type:7; + u8 reserved1:4; + u8 num_nlos:3; + u8 csum_valid:1; + u8 close_type:4; + u8 close_value:4; + u8 reserved2:4; + u8 virtual_channel_id:4; + + struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS]; +} __aligned(1); + +/* QMAP v4 headers */ +struct rmnet_map_dl_csum_trailer { + u8 reserved1; + u8 valid:1; + u8 reserved2:7; + u16 csum_start_offset; + u16 csum_length; + __be16 csum_value; +} __aligned(1); + +struct rmnet_frag_descriptor_pool { + struct list_head free_list; + u32 pool_size; +}; + +struct rmnet_frag_descriptor { + struct list_head list; + struct list_head sub_frags; + skb_frag_t frag; + u8 *hdr_ptr; + struct net_device *dev; + u32 hash; + __be32 tcp_seq; + __be16 ip_id; + u16 data_offset; + u16 gso_size; + u16 gso_segs; + u16 ip_len; + u16 trans_len; + u8 ip_proto; + u8 trans_proto; + u8 pkt_id; + u8 csum_valid:1, + hdrs_valid:1, + ip_id_set:1, + tcp_seq_set:1, + flush_shs:1, + reserved:3; +}; + +struct rmnet_endpoint { + u8 rmnet_mode; + u8 mux_id; + struct net_device *rmnet_dev; +}; + +/* One instance of this structure is instantiated for each real_dev associated + * with rmnet. + */ +struct rmnet_port { + struct net_device *dev; + u8 rmnet_mode; + u32 data_format; + u32 nr_rmnet_devs; + struct rmnet_endpoint muxed_ep[16]; + + /* Descriptor pool */ + spinlock_t desc_pool_lock; + struct rmnet_frag_descriptor_pool *frag_desc_pool; + struct sk_buff *chain_head; + struct sk_buff *chain_tail; +}; + +static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) +{ + struct rmnet_map_header *map_header; + struct rmnet_map_v5_csum_header *ul_header; + u32 padding, map_datalen; + + map_datalen = skb->len; + padding = map_datalen%4; + if (padding) { + padding = 4 - padding; + if (skb_tailroom(skb) < padding) { + printk("skb_tailroom small!\n"); + padding = 0; + } + if (padding) + __skb_put(skb, padding); + } + + map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header))); + + BUILD_BUG_ON((sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header)) != 8); + + map_header->cd_bit = 0; + map_header->next_hdr = 1; + map_header->pad_len = padding; + map_header->mux_id = mux_id; + map_header->pkt_len = htons(map_datalen + padding); + + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + + return skb; +} + +struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id) +{ + return &port->muxed_ep[0]; +} + +static void +rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port) +{ + struct rmnet_nss_cb *nss_cb; + + //rmnet_vnd_rx_fixup(skb->dev, skb->len); + + /* Pass off the packet to NSS driver if we can */ + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) { + if (!port->chain_head) + port->chain_head = skb; + else + skb_shinfo(port->chain_tail)->frag_list = skb; + + port->chain_tail = skb; + return; + } + + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + skb->pkt_type = PACKET_HOST; + skb_set_mac_header(skb, 0); + + //if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + //} else { + //if (!rmnet_check_skb_can_gro(skb)) + // gro_cells_receive(&priv->gro_cells, skb); + //else + netif_receive_skb(skb); + //} +} + +static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb) +{ + /* Nonlinear packets we receive are entirely within frag 0 */ + if (skb_is_nonlinear(skb) && skb->len == skb->data_len) + return skb_frag_address(skb_shinfo(skb)->frags); + + return skb->data; +} + +static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc) +{ + return skb_frag_address(&frag_desc->frag); +} + +static struct rmnet_frag_descriptor * +rmnet_get_frag_descriptor(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct rmnet_frag_descriptor *frag_desc; + + spin_lock(&port->desc_pool_lock); + if (!list_empty(&pool->free_list)) { + frag_desc = list_first_entry(&pool->free_list, + struct rmnet_frag_descriptor, + list); + list_del_init(&frag_desc->list); + } else { + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + goto out; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + pool->pool_size++; + } + +out: + spin_unlock(&port->desc_pool_lock); + return frag_desc; +} + +static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct page *page = skb_frag_page(&frag_desc->frag); + + list_del(&frag_desc->list); + if (page) + put_page(page); + + memset(frag_desc, 0, sizeof(*frag_desc)); + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + spin_lock(&port->desc_pool_lock); + list_add_tail(&frag_desc->list, &pool->free_list); + spin_unlock(&port->desc_pool_lock); +} + +static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc, + struct page *p, u32 page_offset, u32 len) +{ + get_page(p); + __skb_frag_set_page(&frag_desc->frag, p); + skb_frag_size_set(&frag_desc->frag, len); + frag_desc->frag.page_offset = page_offset; +} + +static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (size >= skb_frag_size(&frag_desc->frag)) { + pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n", + __func__, size, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + frag_desc->frag.page_offset += size; + skb_frag_size_sub(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (!size) { + pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n", + __func__, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + if (size < skb_frag_size(&frag_desc->frag)) + skb_frag_size_set(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline u8 +rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_coal_header *)data)->header_type; +} + +static inline bool +rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required; +} + +static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, + struct page *p, u32 page_offset, u32 len) +{ + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = rmnet_get_frag_descriptor(port); + if (!frag_desc) + return; + + rmnet_frag_fill(frag_desc, p, page_offset, len); + list_add_tail(&frag_desc->list, list); +} + +static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, + struct list_head *list) +{ + struct rmnet_map_header *maph; + u8 *data = skb_frag_address(frag); + u32 offset = 0; + u32 packet_len; + + while (offset < skb_frag_size(frag)) { + maph = (struct rmnet_map_header *)data; + packet_len = ntohs(maph->pkt_len); + + /* Some hardware can send us empty frames. Catch them */ + if (packet_len == 0) + return; + + packet_len += sizeof(*maph); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + WARN_ON(1); + } else if (port->data_format & + (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 | + RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) { + u32 hsize = 0; + u8 type; + + type = ((struct rmnet_map_v5_coal_header *) + (data + sizeof(*maph)))->header_type; + switch (type) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + hsize = sizeof(struct rmnet_map_v5_coal_header); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + hsize = sizeof(struct rmnet_map_v5_csum_header); + break; + } + + packet_len += hsize; + } + else { + qmap_hex_dump(__func__, data, 64); + WARN_ON(1); + } + + if ((int)skb_frag_size(frag) - (int)packet_len < 0) + return; + + rmnet_descriptor_add_frag(port, list, skb_frag_page(frag), + frag->page_offset + offset, + packet_len); + + offset += packet_len; + data += packet_len; + } +} + + +#define RMNET_IP_VERSION_4 0x40 +#define RMNET_IP_VERSION_6 0x60 + +/* Helper Functions */ + +static void rmnet_set_skb_proto(struct sk_buff *skb) +{ + switch (rmnet_map_data_ptr(skb)[0] & 0xF0) { + case RMNET_IP_VERSION_4: + skb->protocol = htons(ETH_P_IP); + break; + case RMNET_IP_VERSION_6: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + skb->protocol = htons(ETH_P_MAP); + WARN_ON(1); + break; + } +} + +/* Allocate and populate an skb to contain the packet represented by the + * frag descriptor. + */ +static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *head_skb, *current_skb, *skb; + struct skb_shared_info *shinfo; + struct rmnet_frag_descriptor *sub_frag, *tmp; + + /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */ + if (frag_desc->hdrs_valid) { + u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len; + + head_skb = alloc_skb(hdr_len + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len); + skb_reset_network_header(head_skb); + + if (frag_desc->trans_len) + skb_set_transport_header(head_skb, frag_desc->ip_len); + + /* Packets that have no data portion don't need any frags */ + if (hdr_len == skb_frag_size(&frag_desc->frag)) + goto skip_frags; + + /* If the headers we added are the start of the page, + * we don't want to add them twice + */ + if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) { + if (!rmnet_frag_pull(frag_desc, port, hdr_len)) { + kfree_skb(head_skb); + return NULL; + } + } + } else { + /* Allocate enough space to avoid penalties in the stack + * from __pskb_pull_tail() + */ + head_skb = alloc_skb(256 + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + } + + /* Add main fragment */ + get_page(skb_frag_page(&frag_desc->frag)); + skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag), + frag_desc->frag.page_offset, + skb_frag_size(&frag_desc->frag), + skb_frag_size(&frag_desc->frag)); + + shinfo = skb_shinfo(head_skb); + current_skb = head_skb; + + /* Add in any frags from rmnet_perf */ + list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) { + skb_frag_t *frag; + u32 frag_size; + + frag = &sub_frag->frag; + frag_size = skb_frag_size(frag); + +add_frag: + if (shinfo->nr_frags < MAX_SKB_FRAGS) { + get_page(skb_frag_page(frag)); + skb_add_rx_frag(current_skb, shinfo->nr_frags, + skb_frag_page(frag), frag->page_offset, + frag_size, frag_size); + if (current_skb != head_skb) { + head_skb->len += frag_size; + head_skb->data_len += frag_size; + } + } else { + /* Alloc a new skb and try again */ + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + break; + + if (current_skb == head_skb) + shinfo->frag_list = skb; + else + current_skb->next = skb; + + current_skb = skb; + shinfo = skb_shinfo(current_skb); + goto add_frag; + } + + rmnet_recycle_frag_descriptor(sub_frag, port); + } + +skip_frags: + head_skb->dev = frag_desc->dev; + rmnet_set_skb_proto(head_skb); + + /* Handle any header metadata that needs to be updated after RSB/RSC + * segmentation + */ + if (frag_desc->ip_id_set) { + struct iphdr *iph; + + iph = (struct iphdr *)rmnet_map_data_ptr(head_skb); + csum_replace2(&iph->check, iph->id, frag_desc->ip_id); + iph->id = frag_desc->ip_id; + } + + if (frag_desc->tcp_seq_set) { + struct tcphdr *th; + + th = (struct tcphdr *) + (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len); + th->seq = frag_desc->tcp_seq; + } + + /* Handle csum offloading */ + if (frag_desc->csum_valid && frag_desc->hdrs_valid) { + /* Set the partial checksum information */ + //rmnet_frag_partial_csum(head_skb, frag_desc); + WARN_ON(1); + } else if (frag_desc->csum_valid) { + /* Non-RSB/RSC/perf packet. The current checksum is fine */ + head_skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (frag_desc->hdrs_valid && + (frag_desc->trans_proto == IPPROTO_TCP || + frag_desc->trans_proto == IPPROTO_UDP)) { + /* Unfortunately, we have to fake a bad checksum here, since + * the original bad value is lost by the hardware. The only + * reliable way to do it is to calculate the actual checksum + * and corrupt it. + */ + __sum16 *check; + __wsum csum; + unsigned int offset = skb_transport_offset(head_skb); + __sum16 pseudo; + + WARN_ON(1); + /* Calculate pseudo header and update header fields */ + if (frag_desc->ip_proto == 4) { + struct iphdr *iph = ip_hdr(head_skb); + __be16 tot_len = htons(head_skb->len); + + csum_replace2(&iph->check, iph->tot_len, tot_len); + iph->tot_len = tot_len; + pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(head_skb); + + ip6h->payload_len = htons(head_skb->len - + sizeof(*ip6h)); + pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } + + if (frag_desc->trans_proto == IPPROTO_TCP) { + check = &tcp_hdr(head_skb)->check; + } else { + udp_hdr(head_skb)->len = htons(head_skb->len - + frag_desc->ip_len); + check = &udp_hdr(head_skb)->check; + } + + *check = pseudo; + csum = skb_checksum(head_skb, offset, head_skb->len - offset, + 0); + /* Add 1 to corrupt. This cannot produce a final value of 0 + * since csum_fold() can't return a value of 0xFFFF + */ + *check = csum16_add(csum_fold(csum), htons(1)); + head_skb->ip_summed = CHECKSUM_NONE; + } + + /* Handle any rmnet_perf metadata */ + if (frag_desc->hash) { + head_skb->hash = frag_desc->hash; + head_skb->sw_hash = 1; + } + + if (frag_desc->flush_shs) + head_skb->cb[0] = 1; + + /* Handle coalesced packets */ + //if (frag_desc->gso_segs > 1) + // rmnet_frag_gso_stamp(head_skb, frag_desc); + + return head_skb; +} + +/* Deliver the packets contained within a frag descriptor */ +static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *skb; + + skb = rmnet_alloc_skb(frag_desc, port); + if (skb) + rmnet_deliver_skb(skb, port); + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +/* Process a QMAPv5 packet header */ +static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + struct list_head *list, + u16 len) +{ + int rc = 0; + + switch (rmnet_frag_get_next_hdr_type(frag_desc)) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + rc = -1; + WARN_ON(1); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + if (rmnet_frag_get_csum_valid(frag_desc)) { + frag_desc->csum_valid = true; + } else { + } + + if (!rmnet_frag_pull(frag_desc, port, + sizeof(struct rmnet_map_header) + + sizeof(struct rmnet_map_v5_csum_header))) { + rc = -EINVAL; + break; + } + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + /* Remove padding only for csum offload packets. + * Coalesced packets should never have padding. + */ + if (!rmnet_frag_trim(frag_desc, port, len)) { + rc = -EINVAL; + break; + } + + list_del_init(&frag_desc->list); + list_add_tail(&frag_desc->list, list); + break; + default: + qmap_hex_dump(__func__, rmnet_frag_data_ptr(frag_desc), 64); + rc = -EINVAL; + break; + } + + return rc; +} + +static void +__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_map_header *qmap; + struct rmnet_endpoint *ep; + struct rmnet_frag_descriptor *frag, *tmp; + LIST_HEAD(segs); + u16 len, pad; + u8 mux_id; + + qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag); + mux_id = qmap->mux_id; + pad = qmap->pad_len; + len = ntohs(qmap->pkt_len) - pad; + + if (qmap->cd_bit) { + goto recycle; + } + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto recycle; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto recycle; + + frag_desc->dev = ep->rmnet_dev; + + /* Handle QMAPv5 packet */ + if (qmap->next_hdr && + (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) { + if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs, + len)) + goto recycle; + } else { + /* We only have the main QMAP header to worry about */ + if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap))) + return; + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + if (!rmnet_frag_trim(frag_desc, port, len)) + return; + + list_add_tail(&frag_desc->list, &segs); + } + + list_for_each_entry_safe(frag, tmp, &segs, list) { + list_del_init(&frag->list); + rmnet_frag_deliver(frag, port); + } + return; + +recycle: + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +static void rmnet_frag_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + LIST_HEAD(desc_list); + int i = 0; + struct rmnet_nss_cb *nss_cb; + + /* Deaggregation and freeing of HW originating + * buffers is done within here + */ + while (skb) { + struct sk_buff *skb_frag; + + port->chain_head = NULL; + port->chain_tail = NULL; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + rmnet_frag_deaggregate(&skb_shinfo(skb)->frags[i], port, + &desc_list); + if (!list_empty(&desc_list)) { + struct rmnet_frag_descriptor *frag_desc, *tmp; + + list_for_each_entry_safe(frag_desc, tmp, + &desc_list, list) { + list_del_init(&frag_desc->list); + __rmnet_frag_ingress_handler(frag_desc, + port); + } + } + } + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb && port->chain_head) { + port->chain_head->cb[0] = 0; + netif_receive_skb(port->chain_head); + } + + skb_frag = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = NULL; + consume_skb(skb); + skb = skb_frag; + } +} + +static void +rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) { + if (skb_is_nonlinear(skb)) { + rmnet_frag_ingress_handler(skb, port); + return; + } + } + + WARN_ON(1); +} + +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb); +static int rmnet_is_real_dev_registered(const struct net_device *real_dev) +{ + return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; +} + + +/* Needs either rcu_read_lock() or rtnl lock */ +struct rmnet_port *rmnet_get_port(struct net_device *real_dev) +{ + if (rmnet_is_real_dev_registered(real_dev)) + return rcu_dereference_rtnl(real_dev->rx_handler_data); + else + return NULL; +} + +static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_nss_cb *nss_cb; + + if (!skb) + return RX_HANDLER_CONSUMED; + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + /* Check this so that we dont loop around netif_receive_skb */ + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + skb->dev->stats.rx_packets++; + return RX_HANDLER_PASS; + } + + while (skb) { + struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list; + + skb_shinfo(skb)->frag_list = NULL; + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) + nss_cb->nss_tx(skb); + + skb = skb_frag; + } + + return RX_HANDLER_CONSUMED; +} + +/* Ingress / Egress Entry Points */ + +/* Processes packet as per ingress data format for receiving device. Logical + * endpoint is determined from packet inspection. Packet is then sent to the + * egress device listed in the logical endpoint configuration. + */ +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_port *port; + struct net_device *dev; + + if (!skb) + goto done; + + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + if (skb->protocol != htons(ETH_P_MAP)) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + + dev = skb->dev; + port = rmnet_get_port(dev); + + if (port == NULL) + return RX_HANDLER_PASS; + + port->chain_head = NULL; + port->chain_tail = NULL; + + switch (port->rmnet_mode) { + case RMNET_EPMODE_VND: + rmnet_map_ingress_handler(skb, port); + break; + case RMNET_EPMODE_BRIDGE: + //rmnet_bridge_handler(skb, port->bridge_ep); + break; + } + +done: + return RX_HANDLER_CONSUMED; +} + +static void rmnet_descriptor_deinit(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + struct rmnet_frag_descriptor *frag_desc, *tmp; + + pool = port->frag_desc_pool; + + list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) { + kfree(frag_desc); + pool->pool_size--; + } + + kfree(pool); +} + +static int rmnet_descriptor_init(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + int i; + + spin_lock_init(&port->desc_pool_lock); + pool = kzalloc(sizeof(*pool), GFP_ATOMIC); + if (!pool) + return -ENOMEM; + + INIT_LIST_HEAD(&pool->free_list); + port->frag_desc_pool = pool; + + for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) { + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + return -ENOMEM; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + list_add_tail(&frag_desc->list, &pool->free_list); + pool->pool_size++; + } + + return 0; +} + +struct rmnet_priv { + //struct rmnet_endpoint local_ep; + struct net_device *real_dev; + u8 mux_id; +}; + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rmnet_priv *priv; + + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + priv = netdev_priv(dev); + if (priv->real_dev) { + add_qhdr_v5(skb, priv->mux_id); + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + dev_queue_xmit(skb); + dev->stats.tx_packets++; + //rmnet_egress_handler(skb); + } else { + //this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); + } + return NETDEV_TX_OK; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) + return -EINVAL; + + rmnet_dev->mtu = new_mtu; + return 0; +} + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_start_xmit = rmnet_vnd_start_xmit, + .ndo_change_mtu = rmnet_vnd_change_mtu, + //.ndo_get_iflink = rmnet_vnd_get_iflink, + //.ndo_add_slave = rmnet_add_bridge, + //.ndo_del_slave = rmnet_del_bridge, + //.ndo_init = rmnet_vnd_init, + //.ndo_uninit = rmnet_vnd_uninit, + //.ndo_get_stats64 = rmnet_get_stats64, +}; + +static void rmnet_vnd_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->netdev_ops = &rmnet_vnd_ops; + rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; + rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; + random_ether_addr(rmnet_dev->dev_addr); + rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ + rmnet_dev->type = ARPHRD_RAWIP; + rmnet_dev->hard_header_len = 0; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + //rmnet_dev->needs_free_netdev = true; + + rmnet_dev->hw_features = NETIF_F_RXCSUM; + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + //rmnet_dev->hw_features |= NETIF_F_SG; + //rmnet_dev->hw_features |= NETIF_F_GRO_HW; +} +#else +#include +#include +#include +#include +#include +#include +#include +#include + +static uint nss_debug = 0; +module_param( nss_debug, uint, S_IRUGO | S_IWUSR); + +/* rmnet section */ + +#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) +#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_COALESCE (1U << 4) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 5) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 6) + +#ifdef CONFIG_ARCH_IPQ807x +#define CONFIG_QCA_NSS_DRV +#endif +#ifdef CONFIG_QCA_NSS_DRV +#include "rmnet_nss.c" +#else +#include "rmnet_nss.h" +#endif + +#include "rmnet_vnd.c" +#include "rmnet_map_command.c" +#include "rmnet_map_data.c" +#include "rmnet_descriptor.c" +#include "rmnet_config.c" +#include "rmnet_handlers.c" + +struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; + +void rmnet_data_init(struct net_device *real_dev, u32 nr_rmnet_devs) +{ + struct rmnet_port *port; + struct rmnet_endpoint *ep; + struct net_device *rmnet_dev = NULL; + u32 nr = 0; + struct rmnet_nss_cb *nss_cb; + int rc = 0; + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (!nss_cb) + { +#ifdef CONFIG_QCA_NSS_DRV + pr_err("%s(): initializing rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, &rmnet_nss); +#endif + } + + rtnl_lock(); + rc = rmnet_register_real_device(real_dev); + rtnl_unlock(); + + if (rc) { + pr_err("%s rmnet_register_real_device = %d\n", __func__, rc); + return; + } + + port = rmnet_get_port_rtnl(real_dev); + port->data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION + | RMNET_FLAGS_INGRESS_MAP_CKSUMV5 | RMNET_FLAGS_EGRESS_MAP_CKSUMV5; + port->rmnet_mode = RMNET_EPMODE_VND; + + for (nr = 0; nr < nr_rmnet_devs; nr++) { + u8 mux_id = 0x81+nr; + + ep = kzalloc(sizeof(*ep), GFP_ATOMIC); + + rtnl_lock(); + rmnet_dev = alloc_netdev(sizeof(struct rmnet_priv), + "rmnet_data%d", NET_NAME_PREDICTABLE, + rmnet_vnd_setup); + + rmnet_vnd_newlink(mux_id, rmnet_dev, port, real_dev, ep); + netdev_rx_handler_register(rmnet_dev, rmnet_rx_priv_handler, NULL); + rtnl_unlock(); + + hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); + } + + port->nr_rmnet_devs = nr_rmnet_devs; +} + +void rmnet_data_deinit(struct net_device *real_dev, u32 nr_rmnet_devs) +{ + struct rmnet_port *port; + u32 nr = 0; + struct rmnet_nss_cb *nss_cb; + + if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) + return; + + port = rmnet_get_port_rtnl(real_dev); + + for (nr = 0; nr < nr_rmnet_devs; nr++) { + struct rmnet_endpoint *ep; + u8 mux_id = 0x81+nr; + + ep = rmnet_get_endpoint(port, mux_id); + if (ep) { + hlist_del_init_rcu(&ep->hlnode); + rmnet_vnd_dellink(mux_id, port, ep); + synchronize_rcu(); + kfree(ep); + } + } + + rmnet_unregister_real_device(real_dev, port); + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) { +#ifdef CONFIG_QCA_NSS_DRV + struct hlist_node *tmp; + struct rmnet_nss_ctx *ctx; + int bkt; + + pr_err("%s(): exiting rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, NULL); + + /* Tear down all NSS contexts */ + hash_for_each_safe(rmnet_nss_ctx_hashtable, bkt, tmp, ctx, hnode) + rmnet_nss_free_ctx(ctx); +#endif + } +} +#endif diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.c b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.c new file mode 100644 index 0000000..75006d1 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.c @@ -0,0 +1,661 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Packet Descriptor Framework + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_descriptor.h" +#include "rmnet_handlers.h" +#include "rmnet_private.h" +#include "rmnet_vnd.h" + +#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64 +#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) +#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +typedef void (*rmnet_perf_desc_hook_t)(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port); +typedef void (*rmnet_perf_chain_hook_t)(void); + +static struct rmnet_frag_descriptor * +rmnet_get_frag_descriptor(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct rmnet_frag_descriptor *frag_desc; + + spin_lock(&port->desc_pool_lock); + if (!list_empty(&pool->free_list)) { + frag_desc = list_first_entry(&pool->free_list, + struct rmnet_frag_descriptor, + list); + list_del_init(&frag_desc->list); + } else { + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + goto out; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + pool->pool_size++; + } + +out: + spin_unlock(&port->desc_pool_lock); + return frag_desc; +} + +static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct page *page = skb_frag_page(&frag_desc->frag); + + list_del(&frag_desc->list); + if (page) + put_page(page); + + memset(frag_desc, 0, sizeof(*frag_desc)); + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + spin_lock(&port->desc_pool_lock); + list_add_tail(&frag_desc->list, &pool->free_list); + spin_unlock(&port->desc_pool_lock); +} + +static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, + struct page *p, u32 page_offset, u32 len) +{ + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = rmnet_get_frag_descriptor(port); + if (!frag_desc) + return; + + rmnet_frag_fill(frag_desc, p, page_offset, len); + list_add_tail(&frag_desc->list, list); +} + +static u8 rmnet_frag_do_flow_control(struct rmnet_map_header *qmap, + struct rmnet_port *port, + int enable) +{ + struct rmnet_map_control_command *cmd; + struct rmnet_endpoint *ep; + struct net_device *vnd; + u16 ip_family; + u16 fc_seq; + u32 qos_id; + u8 mux_id; + int r; + + mux_id = qmap->mux_id; + cmd = (struct rmnet_map_control_command *) + ((char *)qmap + sizeof(*qmap)); + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + return RX_HANDLER_CONSUMED; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + return RX_HANDLER_CONSUMED; + + vnd = ep->egress_dev; + + ip_family = cmd->flow_control.ip_family; + fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); + qos_id = ntohl(cmd->flow_control.qos_id); + + /* Ignore the ip family and pass the sequence number for both v4 and v6 + * sequence. User space does not support creating dedicated flows for + * the 2 protocols + */ + r = rmnet_vnd_do_flow_control(vnd, enable); + if (r) + return RMNET_MAP_COMMAND_UNSUPPORTED; + else + return RMNET_MAP_COMMAND_ACK; +} + +static void rmnet_frag_send_ack(struct rmnet_map_header *qmap, + unsigned char type, + struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + struct net_device *dev = port->dev; + struct sk_buff *skb; + u16 alloc_len = ntohs(qmap->pkt_len) + sizeof(*qmap); + + skb = alloc_skb(alloc_len, GFP_ATOMIC); + if (!skb) + return; + + skb->protocol = htons(ETH_P_MAP); + skb->dev = dev; + + cmd = rmnet_map_get_cmd_start(skb); + cmd->cmd_type = type & 0x03; + + netif_tx_lock(dev); + dev->netdev_ops->ndo_start_xmit(skb, dev); + netif_tx_unlock(dev); +} + + +/* Process MAP command frame and send N/ACK message as appropriate. Message cmd + * name is decoded here and appropriate handler is called. + */ +static void rmnet_frag_command(struct rmnet_map_header *qmap, struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + unsigned char command_name; + unsigned char rc = 0; + + cmd = (struct rmnet_map_control_command *) + ((char *)qmap + sizeof(*qmap)); + command_name = cmd->command_name; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_frag_do_flow_control(qmap, port, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_frag_do_flow_control(qmap, port, 0); + break; + + default: + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + break; + } + if (rc == RMNET_MAP_COMMAND_ACK) + rmnet_frag_send_ack(qmap, rc, port); +} + +static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, + struct list_head *list) +{ + struct rmnet_map_header *maph; + u8 *data = skb_frag_address(frag); + u32 offset = 0; + u32 packet_len; + + while (offset < skb_frag_size(frag)) { + maph = (struct rmnet_map_header *)data; + packet_len = ntohs(maph->pkt_len); + + /* Some hardware can send us empty frames. Catch them */ + if (packet_len == 0) + return; + + packet_len += sizeof(*maph); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + WARN_ON(1); + } else if (port->data_format & + (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 | + RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) { + u32 hsize = 0; + u8 type; + + type = ((struct rmnet_map_v5_coal_header *) + (data + sizeof(*maph)))->header_type; + switch (type) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + hsize = sizeof(struct rmnet_map_v5_coal_header); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + hsize = sizeof(struct rmnet_map_v5_csum_header); + break; + } + + packet_len += hsize; + } + else { + //qmap_hex_dump(__func__, data, 64); + WARN_ON(1); + } + + if ((int)skb_frag_size(frag) - (int)packet_len < 0) + return; + + rmnet_descriptor_add_frag(port, list, skb_frag_page(frag), + frag->page_offset + offset, + packet_len); + + offset += packet_len; + data += packet_len; + } +} + +/* Allocate and populate an skb to contain the packet represented by the + * frag descriptor. + */ +static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *head_skb, *current_skb, *skb; + struct skb_shared_info *shinfo; + struct rmnet_frag_descriptor *sub_frag, *tmp; + + /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */ + if (frag_desc->hdrs_valid) { + u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len; + + head_skb = alloc_skb(hdr_len + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len); + skb_reset_network_header(head_skb); + + if (frag_desc->trans_len) + skb_set_transport_header(head_skb, frag_desc->ip_len); + + /* Packets that have no data portion don't need any frags */ + if (hdr_len == skb_frag_size(&frag_desc->frag)) + goto skip_frags; + + /* If the headers we added are the start of the page, + * we don't want to add them twice + */ + if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) { + if (!rmnet_frag_pull(frag_desc, port, hdr_len)) { + kfree_skb(head_skb); + return NULL; + } + } + } else { + /* Allocate enough space to avoid penalties in the stack + * from __pskb_pull_tail() + */ + head_skb = alloc_skb(256 + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + } + + /* Add main fragment */ + get_page(skb_frag_page(&frag_desc->frag)); + skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag), + frag_desc->frag.page_offset, + skb_frag_size(&frag_desc->frag), + skb_frag_size(&frag_desc->frag)); + + shinfo = skb_shinfo(head_skb); + current_skb = head_skb; + + /* Add in any frags from rmnet_perf */ + list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) { + skb_frag_t *frag; + u32 frag_size; + + frag = &sub_frag->frag; + frag_size = skb_frag_size(frag); + +add_frag: + if (shinfo->nr_frags < MAX_SKB_FRAGS) { + get_page(skb_frag_page(frag)); + skb_add_rx_frag(current_skb, shinfo->nr_frags, + skb_frag_page(frag), frag->page_offset, + frag_size, frag_size); + if (current_skb != head_skb) { + head_skb->len += frag_size; + head_skb->data_len += frag_size; + } + } else { + /* Alloc a new skb and try again */ + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + break; + + if (current_skb == head_skb) + shinfo->frag_list = skb; + else + current_skb->next = skb; + + current_skb = skb; + shinfo = skb_shinfo(current_skb); + goto add_frag; + } + + rmnet_recycle_frag_descriptor(sub_frag, port); + } + +skip_frags: + head_skb->dev = frag_desc->dev; + rmnet_set_skb_proto(head_skb); + + /* Handle any header metadata that needs to be updated after RSB/RSC + * segmentation + */ + if (frag_desc->ip_id_set) { + struct iphdr *iph; + + iph = (struct iphdr *)rmnet_map_data_ptr(head_skb); + csum_replace2(&iph->check, iph->id, frag_desc->ip_id); + iph->id = frag_desc->ip_id; + } + + if (frag_desc->tcp_seq_set) { + struct tcphdr *th; + + th = (struct tcphdr *) + (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len); + th->seq = frag_desc->tcp_seq; + } + + /* Handle csum offloading */ + if (frag_desc->csum_valid && frag_desc->hdrs_valid) { + /* Set the partial checksum information */ + //rmnet_frag_partial_csum(head_skb, frag_desc); + WARN_ON(1); + } else if (frag_desc->csum_valid) { + /* Non-RSB/RSC/perf packet. The current checksum is fine */ + head_skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (frag_desc->hdrs_valid && + (frag_desc->trans_proto == IPPROTO_TCP || + frag_desc->trans_proto == IPPROTO_UDP)) { + /* Unfortunately, we have to fake a bad checksum here, since + * the original bad value is lost by the hardware. The only + * reliable way to do it is to calculate the actual checksum + * and corrupt it. + */ + __sum16 *check; + __wsum csum; + unsigned int offset = skb_transport_offset(head_skb); + __sum16 pseudo; + + WARN_ON(1); + /* Calculate pseudo header and update header fields */ + if (frag_desc->ip_proto == 4) { + struct iphdr *iph = ip_hdr(head_skb); + __be16 tot_len = htons(head_skb->len); + + csum_replace2(&iph->check, iph->tot_len, tot_len); + iph->tot_len = tot_len; + pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(head_skb); + + ip6h->payload_len = htons(head_skb->len - + sizeof(*ip6h)); + pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } + + if (frag_desc->trans_proto == IPPROTO_TCP) { + check = &tcp_hdr(head_skb)->check; + } else { + udp_hdr(head_skb)->len = htons(head_skb->len - + frag_desc->ip_len); + check = &udp_hdr(head_skb)->check; + } + + *check = pseudo; + csum = skb_checksum(head_skb, offset, head_skb->len - offset, + 0); + /* Add 1 to corrupt. This cannot produce a final value of 0 + * since csum_fold() can't return a value of 0xFFFF + */ + *check = csum16_add(csum_fold(csum), htons(1)); + head_skb->ip_summed = CHECKSUM_NONE; + } + + /* Handle any rmnet_perf metadata */ + if (frag_desc->hash) { + head_skb->hash = frag_desc->hash; + head_skb->sw_hash = 1; + } + + if (frag_desc->flush_shs) + head_skb->cb[0] = 1; + + /* Handle coalesced packets */ + //if (frag_desc->gso_segs > 1) + // rmnet_frag_gso_stamp(head_skb, frag_desc); + + return head_skb; +} + +/* Deliver the packets contained within a frag descriptor */ +static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *skb; + + skb = rmnet_alloc_skb(frag_desc, port); + if (skb) + rmnet_deliver_skb(skb, port); + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +/* Process a QMAPv5 packet header */ +static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + struct list_head *list, + u16 len) +{ + int rc = 0; + + switch (rmnet_frag_get_next_hdr_type(frag_desc)) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + rc = -1; + WARN_ON(1); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + if (rmnet_frag_get_csum_valid(frag_desc)) { + frag_desc->csum_valid = true; + } else { + } + + if (!rmnet_frag_pull(frag_desc, port, + sizeof(struct rmnet_map_header) + + sizeof(struct rmnet_map_v5_csum_header))) { + rc = -EINVAL; + break; + } + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + /* Remove padding only for csum offload packets. + * Coalesced packets should never have padding. + */ + if (!rmnet_frag_trim(frag_desc, port, len)) { + rc = -EINVAL; + break; + } + + list_del_init(&frag_desc->list); + list_add_tail(&frag_desc->list, list); + break; + default: + //qmap_hex_dump(__func__, rmnet_frag_data_ptr(frag_desc), 64); + rc = -EINVAL; + break; + } + + return rc; +} + +static void +__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_map_header *qmap; + struct rmnet_endpoint *ep; + struct rmnet_frag_descriptor *frag, *tmp; + LIST_HEAD(segs); + u16 len, pad; + u8 mux_id; + + qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag); + mux_id = qmap->mux_id; + pad = qmap->pad_len; + len = ntohs(qmap->pkt_len) - pad; + + if (qmap->cd_bit) { + if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + //rmnet_frag_flow_command(qmap, port, len); + goto recycle; + } + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) + rmnet_frag_command(qmap, port); + + goto recycle; + } + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto recycle; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto recycle; + + frag_desc->dev = ep->egress_dev; + + /* Handle QMAPv5 packet */ + if (qmap->next_hdr && + (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) { + if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs, + len)) + goto recycle; + } else { + /* We only have the main QMAP header to worry about */ + if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap))) + return; + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + if (!rmnet_frag_trim(frag_desc, port, len)) + return; + + list_add_tail(&frag_desc->list, &segs); + } + + list_for_each_entry_safe(frag, tmp, &segs, list) { + list_del_init(&frag->list); + rmnet_frag_deliver(frag, port); + } + return; + +recycle: + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +static void rmnet_frag_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + LIST_HEAD(desc_list); + int i = 0; + struct rmnet_nss_cb *nss_cb; + + /* Deaggregation and freeing of HW originating + * buffers is done within here + */ + while (skb) { + struct sk_buff *skb_frag; + + port->chain_head = NULL; + port->chain_tail = NULL; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + rmnet_frag_deaggregate(&skb_shinfo(skb)->frags[i], port, + &desc_list); + if (!list_empty(&desc_list)) { + struct rmnet_frag_descriptor *frag_desc, *tmp; + + list_for_each_entry_safe(frag_desc, tmp, + &desc_list, list) { + list_del_init(&frag_desc->list); + __rmnet_frag_ingress_handler(frag_desc, + port); + } + } + } + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb && port->chain_head) { + port->chain_head->cb[0] = 0; + netif_receive_skb(port->chain_head); + } + + skb_frag = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = NULL; + consume_skb(skb); + skb = skb_frag; + } +} + +void rmnet_descriptor_deinit(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + struct rmnet_frag_descriptor *frag_desc, *tmp; + + pool = port->frag_desc_pool; + + list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) { + kfree(frag_desc); + pool->pool_size--; + } + + kfree(pool); +} + +int rmnet_descriptor_init(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + int i; + + spin_lock_init(&port->desc_pool_lock); + pool = kzalloc(sizeof(*pool), GFP_ATOMIC); + if (!pool) + return -ENOMEM; + + INIT_LIST_HEAD(&pool->free_list); + port->frag_desc_pool = pool; + + for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) { + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + return -ENOMEM; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + list_add_tail(&frag_desc->list, &pool->free_list); + pool->pool_size++; + } + + return 0; +} diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.h b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.h new file mode 100644 index 0000000..962c663 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_descriptor.h @@ -0,0 +1,146 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Packet Descriptor Framework + * + */ + +#ifndef _RMNET_DESCRIPTOR_H_ +#define _RMNET_DESCRIPTOR_H_ + +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_map.h" + +struct rmnet_frag_descriptor_pool { + struct list_head free_list; + u32 pool_size; +}; + +struct rmnet_frag_descriptor { + struct list_head list; + struct list_head sub_frags; + skb_frag_t frag; + u8 *hdr_ptr; + struct net_device *dev; + u32 hash; + __be32 tcp_seq; + __be16 ip_id; + u16 data_offset; + u16 gso_size; + u16 gso_segs; + u16 ip_len; + u16 trans_len; + u8 ip_proto; + u8 trans_proto; + u8 pkt_id; + u8 csum_valid:1, + hdrs_valid:1, + ip_id_set:1, + tcp_seq_set:1, + flush_shs:1, + reserved:3; +}; + +/* Descriptor management */ +static struct rmnet_frag_descriptor * +rmnet_get_frag_descriptor(struct rmnet_port *port); +static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port); +static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, + struct page *p, u32 page_offset, u32 len); + +/* QMAP command packets */ + +/* Ingress data handlers */ +static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, + struct list_head *list); +static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port); +static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + struct list_head *list, + u16 len); +static void rmnet_frag_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port); + +static int rmnet_descriptor_init(struct rmnet_port *port); +static void rmnet_descriptor_deinit(struct rmnet_port *port); + +static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc) +{ + return skb_frag_address(&frag_desc->frag); +} + +static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (size >= skb_frag_size(&frag_desc->frag)) { + pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n", + __func__, size, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + frag_desc->frag.page_offset += size; + skb_frag_size_sub(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (!size) { + pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n", + __func__, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + if (size < skb_frag_size(&frag_desc->frag)) + skb_frag_size_set(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc, + struct page *p, u32 page_offset, u32 len) +{ + get_page(p); + __skb_frag_set_page(&frag_desc->frag, p); + skb_frag_size_set(&frag_desc->frag, len); + frag_desc->frag.page_offset = page_offset; +} + +static inline u8 +rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_coal_header *)data)->header_type; +} + +static inline bool +rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required; +} + +#endif /* _RMNET_DESCRIPTOR_H_ */ diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.c b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.c new file mode 100644 index 0000000..6f1ce9d --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.c @@ -0,0 +1,374 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data ingress/egress handler + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "rmnet_private.h" +#include "rmnet_config.h" +#include "rmnet_vnd.h" +#include "rmnet_map.h" +#include "rmnet_handlers.h" +#include "rmnet_descriptor.h" + +#define RMNET_IP_VERSION_4 0x40 +#define RMNET_IP_VERSION_6 0x60 + +/* Helper Functions */ + +static void rmnet_set_skb_proto(struct sk_buff *skb) +{ + switch (rmnet_map_data_ptr(skb)[0] & 0xF0) { + case RMNET_IP_VERSION_4: + skb->protocol = htons(ETH_P_IP); + break; + case RMNET_IP_VERSION_6: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + skb->protocol = htons(ETH_P_MAP); + break; + } +} + +/* Generic handler */ + +static void +rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port) +{ + struct rmnet_nss_cb *nss_cb; + + rmnet_vnd_rx_fixup(skb->dev, skb->len); + + /* Pass off the packet to NSS driver if we can */ + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) { + if (!port->chain_head) + port->chain_head = skb; + else + skb_shinfo(port->chain_tail)->frag_list = skb; + + port->chain_tail = skb; + return; + } + + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + skb->pkt_type = PACKET_HOST; + skb_set_mac_header(skb, 0); + + //if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + //} else { + //if (!rmnet_check_skb_can_gro(skb)) + // gro_cells_receive(&priv->gro_cells, skb); + //else + netif_receive_skb(skb); + //} +} + +/* Deliver a list of skbs after undoing coalescing */ +static void rmnet_deliver_skb_list(struct sk_buff_head *head, + struct rmnet_port *port) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(head))) { + rmnet_set_skb_proto(skb); + rmnet_deliver_skb(skb, port); + } +} + +/* MAP handler */ + +static void +_rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct rmnet_map_header *qmap; + struct rmnet_endpoint *ep; + struct sk_buff_head list; + u16 len, pad; + u8 mux_id; + + /* We don't need the spinlock since only we touch this */ + __skb_queue_head_init(&list); + + qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb); + if (qmap->cd_bit) { + if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + //if (!rmnet_map_flow_command(skb, port, false)) + return; + } + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) + return rmnet_map_command(skb, port); + + goto free_skb; + } + + mux_id = qmap->mux_id; + pad = qmap->pad_len; + len = ntohs(qmap->pkt_len) - pad; + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto free_skb; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto free_skb; + + skb->dev = ep->egress_dev; + + /* Handle QMAPv5 packet */ + if (qmap->next_hdr && + (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) { + if (rmnet_map_process_next_hdr_packet(skb, &list, len)) + goto free_skb; + } else { + /* We only have the main QMAP header to worry about */ + pskb_pull(skb, sizeof(*qmap)); + + rmnet_set_skb_proto(skb); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { + //if (!rmnet_map_checksum_downlink_packet(skb, len + pad)) + // skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + pskb_trim(skb, len); + + /* Push the single packet onto the list */ + __skb_queue_tail(&list, skb); + } + + rmnet_deliver_skb_list(&list, port); + return; + +free_skb: + kfree_skb(skb); +} + +static void +rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct sk_buff *skbn; + + if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) { + if (skb_is_nonlinear(skb)) { + rmnet_frag_ingress_handler(skb, port); + return; + } + } + + /* Deaggregation and freeing of HW originating + * buffers is done within here + */ + while (skb) { + struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list; + + skb_shinfo(skb)->frag_list = NULL; + while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) { + _rmnet_map_ingress_handler(skbn, port); + + if (skbn == skb) + goto next_skb; + } + + consume_skb(skb); +next_skb: + skb = skb_frag; + } +} + +static int rmnet_map_egress_handler(struct sk_buff *skb, + struct rmnet_port *port, u8 mux_id, + struct net_device *orig_dev) +{ + int required_headroom, additional_header_len, csum_type; + struct rmnet_map_header *map_header; + + additional_header_len = 0; + required_headroom = sizeof(struct rmnet_map_header); + csum_type = 0; + + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) { + additional_header_len = sizeof(struct rmnet_map_ul_csum_header); + csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4; + } else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) { + additional_header_len = sizeof(struct rmnet_map_v5_csum_header); + csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5; + } + + required_headroom += additional_header_len; + + if (skb_headroom(skb) < required_headroom) { + if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC)) + return -ENOMEM; + } + + if (csum_type) + rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type); + + map_header = rmnet_map_add_map_header(skb, additional_header_len, 0, + port); + if (!map_header) + return -ENOMEM; + + map_header->mux_id = mux_id; + + if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) { + if (rmnet_map_tx_agg_skip(skb, required_headroom)) + goto done; + + rmnet_map_tx_aggregate(skb, port); + return -EINPROGRESS; + } + +done: + skb->protocol = htons(ETH_P_MAP); + return 0; +} + +static void +rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) +{ + if (bridge_dev) { + skb->dev = bridge_dev; + dev_queue_xmit(skb); + } +} + +/* Ingress / Egress Entry Points */ + +/* Processes packet as per ingress data format for receiving device. Logical + * endpoint is determined from packet inspection. Packet is then sent to the + * egress device listed in the logical endpoint configuration. + */ +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_port *port; + struct net_device *dev; + + if (!skb) + goto done; + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + dev = skb->dev; + port = rmnet_get_port(dev); + + port->chain_head = NULL; + port->chain_tail = NULL; + + switch (port->rmnet_mode) { + case RMNET_EPMODE_VND: + rmnet_map_ingress_handler(skb, port); + break; + case RMNET_EPMODE_BRIDGE: + rmnet_bridge_handler(skb, port->bridge_ep); + break; + } + +done: + return RX_HANDLER_CONSUMED; +} + +static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_nss_cb *nss_cb; + + if (!skb) + return RX_HANDLER_CONSUMED; + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + /* Check this so that we dont loop around netif_receive_skb */ + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + skb->dev->stats.rx_packets++; + return RX_HANDLER_PASS; + } + + while (skb) { + struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list; + + skb_shinfo(skb)->frag_list = NULL; + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) + nss_cb->nss_tx(skb); + + skb = skb_frag; + } + + return RX_HANDLER_CONSUMED; +} + +/* Modifies packet as per logical endpoint configuration and egress data format + * for egress device configured in logical endpoint. Packet is then transmitted + * on the egress device. + */ +static void rmnet_egress_handler(struct sk_buff *skb) +{ + struct net_device *orig_dev; + struct rmnet_port *port; + struct rmnet_priv *priv; + u8 mux_id; + int err; + u32 skb_len; + + skb_orphan(skb); + + orig_dev = skb->dev; + priv = netdev_priv(orig_dev); + skb->dev = priv->real_dev; + mux_id = priv->mux_id; + + port = rmnet_get_port(skb->dev); + if (!port) + goto drop; + + skb_len = skb->len; + err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev); + if (err == -ENOMEM) + goto drop; + else if (err == -EINPROGRESS) { + rmnet_vnd_tx_fixup(orig_dev, skb_len); + return; + } + + rmnet_vnd_tx_fixup(orig_dev, skb_len); + + dev_queue_xmit(skb); + return; + +drop: + this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); +} diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.h b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.h new file mode 100644 index 0000000..29837ba --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_handlers.h @@ -0,0 +1,32 @@ +/* Copyright (c) 2013, 2016-2017, 2019 + * The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data ingress/egress handler + * + */ + +#ifndef _RMNET_HANDLERS_H_ +#define _RMNET_HANDLERS_H_ + +#include "rmnet_config.h" + +enum rmnet_packet_context { + RMNET_NET_RX_CTX, + RMNET_WQ_CTX, +}; + +static void rmnet_egress_handler(struct sk_buff *skb); +static void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port); +static void rmnet_set_skb_proto(struct sk_buff *skb); +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb); +static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb); +#endif /* _RMNET_HANDLERS_H_ */ diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map.h b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map.h new file mode 100644 index 0000000..ab49149 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map.h @@ -0,0 +1,272 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_MAP_H_ +#define _RMNET_MAP_H_ + +#include +#include "rmnet_config.h" + +struct rmnet_map_control_command { + u8 command_name; + u8 cmd_type:2; + u8 reserved:6; + u16 reserved2; + u32 transaction_id; + union { + struct { + u16 ip_family:2; + u16 reserved:14; + __be16 flow_control_seq_num; + __be32 qos_id; + } flow_control; + u8 data[0]; + }; +} __aligned(1); + +enum rmnet_map_commands { + RMNET_MAP_COMMAND_NONE, + RMNET_MAP_COMMAND_FLOW_DISABLE, + RMNET_MAP_COMMAND_FLOW_ENABLE, + RMNET_MAP_COMMAND_FLOW_START = 7, + RMNET_MAP_COMMAND_FLOW_END = 8, + /* These should always be the last 2 elements */ + RMNET_MAP_COMMAND_UNKNOWN, + RMNET_MAP_COMMAND_ENUM_LENGTH +}; + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +enum rmnet_map_v5_close_type { + RMNET_MAP_COAL_CLOSE_NON_COAL, + RMNET_MAP_COAL_CLOSE_IP_MISS, + RMNET_MAP_COAL_CLOSE_TRANS_MISS, + RMNET_MAP_COAL_CLOSE_HW, + RMNET_MAP_COAL_CLOSE_COAL, +}; + +enum rmnet_map_v5_close_value { + RMNET_MAP_COAL_CLOSE_HW_NL, + RMNET_MAP_COAL_CLOSE_HW_PKT, + RMNET_MAP_COAL_CLOSE_HW_BYTE, + RMNET_MAP_COAL_CLOSE_HW_TIME, + RMNET_MAP_COAL_CLOSE_HW_EVICT, +}; + +/* Main QMAP header */ +struct rmnet_map_header { + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; + __be16 reserved; +} __aligned(1); + +struct rmnet_map_v5_nl_pair { + __be16 pkt_len; + u8 csum_error_bitmap; + u8 num_packets; +} __aligned(1); + +/* NLO: Number-length object */ +#define RMNET_MAP_V5_MAX_NLOS (6) +#define RMNET_MAP_V5_MAX_PACKETS (48) + +struct rmnet_map_v5_coal_header { + u8 next_hdr:1; + u8 header_type:7; + u8 reserved1:4; + u8 num_nlos:3; + u8 csum_valid:1; + u8 close_type:4; + u8 close_value:4; + u8 reserved2:4; + u8 virtual_channel_id:4; + + struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS]; +} __aligned(1); + +/* QMAP v4 headers */ +struct rmnet_map_dl_csum_trailer { + u8 reserved1; + u8 valid:1; + u8 reserved2:7; + u16 csum_start_offset; + u16 csum_length; + __be16 csum_value; +} __aligned(1); + +struct rmnet_map_ul_csum_header { + __be16 csum_start_offset; + u16 csum_insert_offset:14; + u16 udp_ind:1; + u16 csum_enabled:1; +} __aligned(1); + +struct rmnet_map_control_command_header { + u8 command_name; + u8 cmd_type:2; + u8 reserved:5; + u8 e:1; + u16 source_id:15; + u16 ext:1; + u32 transaction_id; +} __aligned(1); + +struct rmnet_map_flow_info_le { + __be32 mux_id; + __be32 flow_id; + __be32 bytes; + __be32 pkts; +} __aligned(1); + +struct rmnet_map_flow_info_be { + u32 mux_id; + u32 flow_id; + u32 bytes; + u32 pkts; +} __aligned(1); + +struct rmnet_map_dl_ind_hdr { + union { + struct { + u32 seq; + u32 bytes; + u32 pkts; + u32 flows; + struct rmnet_map_flow_info_le flow[0]; + } le __aligned(1); + struct { + __be32 seq; + __be32 bytes; + __be32 pkts; + __be32 flows; + struct rmnet_map_flow_info_be flow[0]; + } be __aligned(1); + } __aligned(1); +} __aligned(1); + +struct rmnet_map_dl_ind_trl { + union { + __be32 seq_be; + u32 seq_le; + } __aligned(1); +} __aligned(1); + +struct rmnet_map_dl_ind { + u8 priority; + union { + void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *); + void (*dl_hdr_handler_v2)(struct rmnet_map_dl_ind_hdr *, + struct + rmnet_map_control_command_header *); + } __aligned(1); + union { + void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *); + void (*dl_trl_handler_v2)(struct rmnet_map_dl_ind_trl *, + struct + rmnet_map_control_command_header *); + } __aligned(1); + struct list_head list; +}; + +#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ + (Y)->data)->mux_id) +#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ + (Y)->data)->cd_bit) +#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header *) \ + (Y)->data)->pad_len) +#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command *) \ + ((Y)->data + \ + sizeof(struct rmnet_map_header))) +#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \ + (Y)->data)->pkt_len)) + +#define RMNET_MAP_DEAGGR_SPACING 64 +#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) +#define RMNET_MAP_DESC_HEADROOM 128 + +#define RMNET_MAP_COMMAND_REQUEST 0 +#define RMNET_MAP_COMMAND_ACK 1 +#define RMNET_MAP_COMMAND_UNSUPPORTED 2 +#define RMNET_MAP_COMMAND_INVALID 3 + +#define RMNET_MAP_NO_PAD_BYTES 0 +#define RMNET_MAP_ADD_PAD_BYTES 1 + +static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb) +{ + /* Nonlinear packets we receive are entirely within frag 0 */ + if (skb_is_nonlinear(skb) && skb->len == skb->data_len) + return skb_frag_address(skb_shinfo(skb)->frags); + + return skb->data; +} + +static inline struct rmnet_map_control_command * +rmnet_map_get_cmd_start(struct sk_buff *skb) +{ + unsigned char *data = rmnet_map_data_ptr(skb); + + data += sizeof(struct rmnet_map_header); + return (struct rmnet_map_control_command *)data; +} + +static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb) +{ + unsigned char *data = rmnet_map_data_ptr(skb); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_coal_header *)data)->header_type; +} + +static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb) +{ + unsigned char *data = rmnet_map_data_ptr(skb); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required; +} + +static struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_port *port); +static struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, + int hdrlen, int pad, + struct rmnet_port *port); +static void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port); +static void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev, + int csum_type); +static int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, + struct sk_buff_head *list, + u16 len); +static int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset); +static void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port); +static void rmnet_map_tx_aggregate_init(struct rmnet_port *port); +static void rmnet_map_tx_aggregate_exit(struct rmnet_port *port); +static void rmnet_map_cmd_init(struct rmnet_port *port); +static void rmnet_map_cmd_exit(struct rmnet_port *port); +#endif /* _RMNET_MAP_H_ */ diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_command.c b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_command.c new file mode 100644 index 0000000..6c33184 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_command.c @@ -0,0 +1,143 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "rmnet_config.h" +#include "rmnet_map.h" +#include "rmnet_private.h" +#include "rmnet_vnd.h" + +#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \ + sizeof(struct rmnet_map_header) + \ + sizeof(struct rmnet_map_control_command_header)) + +static u8 rmnet_map_do_flow_control(struct sk_buff *skb, + struct rmnet_port *port, + int enable) +{ + struct rmnet_map_header *qmap; + struct rmnet_map_control_command *cmd; + struct rmnet_endpoint *ep; + struct net_device *vnd; + u16 ip_family; + u16 fc_seq; + u32 qos_id; + u8 mux_id; + int r; + + qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb); + mux_id = qmap->mux_id; + cmd = rmnet_map_get_cmd_start(skb); + + if (mux_id >= RMNET_MAX_LOGICAL_EP) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + vnd = ep->egress_dev; + + ip_family = cmd->flow_control.ip_family; + fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); + qos_id = ntohl(cmd->flow_control.qos_id); + + /* Ignore the ip family and pass the sequence number for both v4 and v6 + * sequence. User space does not support creating dedicated flows for + * the 2 protocols + */ + r = rmnet_vnd_do_flow_control(vnd, enable); + if (r) { + kfree_skb(skb); + return RMNET_MAP_COMMAND_UNSUPPORTED; + } else { + return RMNET_MAP_COMMAND_ACK; + } +} + +static void rmnet_map_send_ack(struct sk_buff *skb, + unsigned char type, + struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + struct net_device *dev = skb->dev; + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) + pskb_trim(skb, + skb->len - sizeof(struct rmnet_map_dl_csum_trailer)); + + skb->protocol = htons(ETH_P_MAP); + + cmd = rmnet_map_get_cmd_start(skb); + cmd->cmd_type = type & 0x03; + + netif_tx_lock(dev); + dev->netdev_ops->ndo_start_xmit(skb, dev); + netif_tx_unlock(dev); +} + +/* Process MAP command frame and send N/ACK message as appropriate. Message cmd + * name is decoded here and appropriate handler is called. + */ +static void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) +{ + struct rmnet_map_control_command *cmd; + unsigned char command_name; + unsigned char rc = 0; + + cmd = rmnet_map_get_cmd_start(skb); + command_name = cmd->command_name; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_map_do_flow_control(skb, port, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_map_do_flow_control(skb, port, 0); + break; + + default: + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + kfree_skb(skb); + break; + } + if (rc == RMNET_MAP_COMMAND_ACK) + rmnet_map_send_ack(skb, rc, port); +} + + +static void rmnet_map_cmd_exit(struct rmnet_port *port) +{ + struct rmnet_map_dl_ind *tmp, *idx; + + list_for_each_entry_safe(tmp, idx, &port->dl_list, list) + list_del_rcu(&tmp->list); +} + +static void rmnet_map_cmd_init(struct rmnet_port *port) +{ + INIT_LIST_HEAD(&port->dl_list); + + port->dl_marker_flush = -1; +} diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_data.c b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_data.c new file mode 100644 index 0000000..783412c --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_map_data.c @@ -0,0 +1,682 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data MAP protocol + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_map.h" +#include "rmnet_private.h" +#include "rmnet_handlers.h" + +#define RMNET_MAP_PKT_COPY_THRESHOLD 64 +#define RMNET_MAP_DEAGGR_SPACING 64 +#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) + +struct rmnet_map_coal_metadata { + void *ip_header; + void *trans_header; + u16 ip_len; + u16 trans_len; + u16 data_offset; + u16 data_len; + u8 ip_proto; + u8 trans_proto; + u8 pkt_id; + u8 pkt_count; +}; + +static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, + const void *txporthdr) +{ + __sum16 *check = NULL; + + switch (protocol) { + case IPPROTO_TCP: + check = &(((struct tcphdr *)txporthdr)->check); + break; + + case IPPROTO_UDP: + check = &(((struct udphdr *)txporthdr)->check); + break; + + default: + check = NULL; + break; + } + + return check; +} + +static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + void *txphdr; + u16 *csum; + + txphdr = iphdr + ip4h->ihl * 4; + + if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv4_ul_csum_header(void *iphdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)iphdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + if (ip4h->protocol == IPPROTO_UDP) + ul_header->udp_ind = 1; + else + ul_header->udp_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr) +{ + struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; + void *txphdr; + u16 *csum; + + txphdr = ip6hdr + sizeof(struct ipv6hdr); + + if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv6_ul_csum_header(void *ip6hdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)ip6hdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + + if (ip6h->nexthdr == IPPROTO_UDP) + ul_header->udp_ind = 1; + else + ul_header->udp_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr); +} +#endif + +/* Adds MAP header to front of skb->data + * Padding is calculated and set appropriately in MAP header. Mux ID is + * initialized to 0. + */ +static struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, + int hdrlen, int pad, + struct rmnet_port *port) +{ + struct rmnet_map_header *map_header; + u32 padding, map_datalen; + u8 *padbytes; + + map_datalen = skb->len - hdrlen; + map_header = (struct rmnet_map_header *) + skb_push(skb, sizeof(struct rmnet_map_header)); + memset(map_header, 0, sizeof(struct rmnet_map_header)); + + /* Set next_hdr bit for csum offload packets */ + if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) + map_header->next_hdr = 1; + + if (pad == RMNET_MAP_NO_PAD_BYTES) { + map_header->pkt_len = htons(map_datalen); + return map_header; + } + + padding = ALIGN(map_datalen, 4) - map_datalen; + + if (padding == 0) + goto done; + + if (skb_tailroom(skb) < padding) + return NULL; + + padbytes = (u8 *)skb_put(skb, padding); + memset(padbytes, 0, padding); + +done: + map_header->pkt_len = htons(map_datalen + padding); + map_header->pad_len = padding & 0x3F; + + return map_header; +} + +/* Deaggregates a single packet + * A whole new buffer is allocated for each portion of an aggregated frame. + * Caller should keep calling deaggregate() on the source skb until 0 is + * returned, indicating that there are no more packets to deaggregate. Caller + * is responsible for freeing the original skb. + */ +static struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct rmnet_map_header *maph; + struct sk_buff *skbn; + unsigned char *data = rmnet_map_data_ptr(skb), *next_hdr = NULL; + u32 packet_len; + + if (skb->len == 0) + return NULL; + + maph = (struct rmnet_map_header *)data; + packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) { + if (!maph->cd_bit) { + packet_len += sizeof(struct rmnet_map_v5_csum_header); + + /* Coalescing headers require MAPv5 */ + next_hdr = data + sizeof(*maph); + } + } + + if (((int)skb->len - (int)packet_len) < 0) + return NULL; + + /* Some hardware can send us empty frames. Catch them */ + if (ntohs(maph->pkt_len) == 0) + return NULL; + + if (next_hdr && + ((struct rmnet_map_v5_coal_header *)next_hdr)->header_type == + RMNET_MAP_HEADER_TYPE_COALESCING) + return skb; + + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag0 = skb_shinfo(skb)->frags; + struct page *page = skb_frag_page(frag0); + + skbn = alloc_skb(RMNET_MAP_DEAGGR_HEADROOM, GFP_ATOMIC); + if (!skbn) + return NULL; + + skb_append_pagefrags(skbn, page, frag0->page_offset, + packet_len); + skbn->data_len += packet_len; + skbn->len += packet_len; + } else { + skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, + GFP_ATOMIC); + if (!skbn) + return NULL; + + skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); + skb_put(skbn, packet_len); + memcpy(skbn->data, data, packet_len); + } + + pskb_pull(skb, packet_len); + + return skbn; +} + +static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev) +{ + struct rmnet_priv *priv = netdev_priv(orig_dev); + struct rmnet_map_ul_csum_header *ul_header; + void *iphdr; + + ul_header = (struct rmnet_map_ul_csum_header *) + skb_push(skb, sizeof(struct rmnet_map_ul_csum_header)); + + if (unlikely(!(orig_dev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) + goto sw_csum; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + iphdr = (char *)ul_header + + sizeof(struct rmnet_map_ul_csum_header); + + if (skb->protocol == htons(ETH_P_IP)) { + rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb); + priv->stats.csum_hw++; + return; + } else if (skb->protocol == htons(ETH_P_IPV6)) { +#if IS_ENABLED(CONFIG_IPV6) + rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); + priv->stats.csum_hw++; + return; +#else + priv->stats.csum_err_invalid_ip_version++; + goto sw_csum; +#endif + } else { + priv->stats.csum_err_invalid_ip_version++; + } + } + +sw_csum: + ul_header->csum_start_offset = 0; + ul_header->csum_insert_offset = 0; + ul_header->csum_enabled = 0; + ul_header->udp_ind = 0; + + priv->stats.csum_sw++; +} + +static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev) +{ + struct rmnet_priv *priv = netdev_priv(orig_dev); + struct rmnet_map_v5_csum_header *ul_header; + + ul_header = (struct rmnet_map_v5_csum_header *) + skb_push(skb, sizeof(*ul_header)); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + void *iph = (char *)ul_header + sizeof(*ul_header); + void *trans; + __sum16 *check; + u8 proto; + + if (skb->protocol == htons(ETH_P_IP)) { + u16 ip_len = ((struct iphdr *)iph)->ihl * 4; + + proto = ((struct iphdr *)iph)->protocol; + trans = iph + ip_len; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + u16 ip_len = sizeof(struct ipv6hdr); + + proto = ((struct ipv6hdr *)iph)->nexthdr; + trans = iph + ip_len; + } else { + priv->stats.csum_err_invalid_ip_version++; + goto sw_csum; + } + + check = rmnet_map_get_csum_field(proto, trans); + if (check) { + *check = 0; + skb->ip_summed = CHECKSUM_NONE; + /* Ask for checksum offloading */ + ul_header->csum_valid_required = 1; + priv->stats.csum_hw++; + return; + } + } + +sw_csum: + priv->stats.csum_sw++; +} + + +/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP + * packets that are supported for UL checksum offload. + */ +void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev, + int csum_type) +{ + switch (csum_type) { + case RMNET_FLAGS_EGRESS_MAP_CKSUMV4: + rmnet_map_v4_checksum_uplink_packet(skb, orig_dev); + break; + case RMNET_FLAGS_EGRESS_MAP_CKSUMV5: + rmnet_map_v5_checksum_uplink_packet(skb, orig_dev); + break; + default: + break; + } +} + +static void rmnet_map_move_headers(struct sk_buff *skb) +{ + struct iphdr *iph; + u16 ip_len; + u16 trans_len = 0; + u8 proto; + + /* This only applies to non-linear SKBs */ + if (!skb_is_nonlinear(skb)) + return; + + iph = (struct iphdr *)rmnet_map_data_ptr(skb); + if (iph->version == 4) { + ip_len = iph->ihl * 4; + proto = iph->protocol; + if (iph->frag_off & htons(IP_OFFSET)) + /* No transport header information */ + goto pull; + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + __be16 frag_off; + u8 nexthdr = ip6h->nexthdr; + + ip_len = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, + &frag_off); + if (ip_len < 0) + return; + + proto = nexthdr; + } else { + return; + } + + if (proto == IPPROTO_TCP) { + struct tcphdr *tp = (struct tcphdr *)((u8 *)iph + ip_len); + + trans_len = tp->doff * 4; + } else if (proto == IPPROTO_UDP) { + trans_len = sizeof(struct udphdr); + } else if (proto == NEXTHDR_FRAGMENT) { + /* Non-first fragments don't have the fragment length added by + * ipv6_skip_exthdr() and sho up as proto NEXTHDR_FRAGMENT, so + * we account for the length here. + */ + ip_len += sizeof(struct frag_hdr); + } + +pull: + __pskb_pull_tail(skb, ip_len + trans_len); + skb_reset_network_header(skb); + if (trans_len) + skb_set_transport_header(skb, ip_len); +} + + +/* Process a QMAPv5 packet header */ +static int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, + struct sk_buff_head *list, + u16 len) +{ + struct rmnet_priv *priv = netdev_priv(skb->dev); + int rc = 0; + + switch (rmnet_map_get_next_hdr_type(skb)) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + priv->stats.coal.coal_rx++; + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + if (rmnet_map_get_csum_valid(skb)) { + priv->stats.csum_ok++; + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + priv->stats.csum_valid_unset++; + } + + /* Pull unnecessary headers and move the rest to the linear + * section of the skb. + */ + pskb_pull(skb, + (sizeof(struct rmnet_map_header) + + sizeof(struct rmnet_map_v5_csum_header))); + rmnet_map_move_headers(skb); + + /* Remove padding only for csum offload packets. + * Coalesced packets should never have padding. + */ + pskb_trim(skb, len); + __skb_queue_tail(list, skb); + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +long rmnet_agg_time_limit __read_mostly = 1000000L; +long rmnet_agg_bypass_time __read_mostly = 10000000L; + +static int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset) +{ + u8 *packet_start = skb->data + offset; + int is_icmp = 0; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip4h = (struct iphdr *)(packet_start); + + if (ip4h->protocol == IPPROTO_ICMP) + is_icmp = 1; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start); + + if (ip6h->nexthdr == IPPROTO_ICMPV6) { + is_icmp = 1; + } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) { + struct frag_hdr *frag; + + frag = (struct frag_hdr *)(packet_start + + sizeof(struct ipv6hdr)); + if (frag->nexthdr == IPPROTO_ICMPV6) + is_icmp = 1; + } + } + + return is_icmp; +} + +static void rmnet_map_flush_tx_packet_work(struct work_struct *work) +{ + struct sk_buff *skb = NULL; + struct rmnet_port *port; + unsigned long flags; + + port = container_of(work, struct rmnet_port, agg_wq); + + spin_lock_irqsave(&port->agg_lock, flags); + if (likely(port->agg_state == -EINPROGRESS)) { + /* Buffer may have already been shipped out */ + if (likely(port->agg_skb)) { + skb = port->agg_skb; + port->agg_skb = NULL; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + } + port->agg_state = 0; + } + + spin_unlock_irqrestore(&port->agg_lock, flags); + if (skb) + dev_queue_xmit(skb); +} + +static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t) +{ + struct rmnet_port *port; + + port = container_of(t, struct rmnet_port, hrtimer); + + schedule_work(&port->agg_wq); + return HRTIMER_NORESTART; +} + +static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src) +{ + unsigned int linear = src->len - src->data_len, target = src->len; + unsigned char *src_buf; + struct sk_buff *skb; + + src_buf = src->data; + skb_put_data(dst, src_buf, linear); + target -= linear; + + skb = src; + + while (target) { + unsigned int i = 0, non_linear = 0; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + non_linear = skb_frag_size(&skb_shinfo(skb)->frags[i]); + src_buf = skb_frag_address(&skb_shinfo(skb)->frags[i]); + + skb_put_data(dst, src_buf, non_linear); + target -= non_linear; + } + + if (skb_shinfo(skb)->frag_list) { + skb = skb_shinfo(skb)->frag_list; + continue; + } + + if (skb->next) + skb = skb->next; + } +} + +static void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port) +{ + struct timespec diff, last; + int size, agg_count = 0; + struct sk_buff *agg_skb; + unsigned long flags; + +new_packet: + spin_lock_irqsave(&port->agg_lock, flags); + memcpy(&last, &port->agg_last, sizeof(struct timespec)); + getnstimeofday(&port->agg_last); + + if (!port->agg_skb) { + /* Check to see if we should agg first. If the traffic is very + * sparse, don't aggregate. We will need to tune this later + */ + diff = timespec_sub(port->agg_last, last); + size = port->egress_agg_params.agg_size - skb->len; + + if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time || + size <= 0) { + spin_unlock_irqrestore(&port->agg_lock, flags); + skb->protocol = htons(ETH_P_MAP); + dev_queue_xmit(skb); + return; + } + + port->agg_skb = alloc_skb(port->egress_agg_params.agg_size, + GFP_ATOMIC); + if (!port->agg_skb) { + port->agg_skb = 0; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + spin_unlock_irqrestore(&port->agg_lock, flags); + skb->protocol = htons(ETH_P_MAP); + dev_queue_xmit(skb); + return; + } + rmnet_map_linearize_copy(port->agg_skb, skb); + port->agg_skb->dev = skb->dev; + port->agg_skb->protocol = htons(ETH_P_MAP); + port->agg_count = 1; + getnstimeofday(&port->agg_time); + dev_kfree_skb_any(skb); + goto schedule; + } + diff = timespec_sub(port->agg_last, port->agg_time); + size = port->egress_agg_params.agg_size - port->agg_skb->len; + + if (skb->len > size || + port->agg_count >= port->egress_agg_params.agg_count || + diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) { + agg_skb = port->agg_skb; + agg_count = port->agg_count; + port->agg_skb = 0; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + port->agg_state = 0; + spin_unlock_irqrestore(&port->agg_lock, flags); + hrtimer_cancel(&port->hrtimer); + dev_queue_xmit(agg_skb); + goto new_packet; + } + + rmnet_map_linearize_copy(port->agg_skb, skb); + port->agg_count++; + dev_kfree_skb_any(skb); + +schedule: + if (port->agg_state != -EINPROGRESS) { + port->agg_state = -EINPROGRESS; + hrtimer_start(&port->hrtimer, + ns_to_ktime(port->egress_agg_params.agg_time), + HRTIMER_MODE_REL); + } + spin_unlock_irqrestore(&port->agg_lock, flags); +} + +static void rmnet_map_tx_aggregate_init(struct rmnet_port *port) +{ + hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + port->hrtimer.function = rmnet_map_flush_tx_packet_queue; + port->egress_agg_params.agg_size = 8192; + port->egress_agg_params.agg_count = 20; + port->egress_agg_params.agg_time = 3000000; + spin_lock_init(&port->agg_lock); + + INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work); +} + +static void rmnet_map_tx_aggregate_exit(struct rmnet_port *port) +{ + unsigned long flags; + + hrtimer_cancel(&port->hrtimer); + cancel_work_sync(&port->agg_wq); + + spin_lock_irqsave(&port->agg_lock, flags); + if (port->agg_state == -EINPROGRESS) { + if (port->agg_skb) { + kfree_skb(port->agg_skb); + port->agg_skb = NULL; + port->agg_count = 0; + memset(&port->agg_time, 0, sizeof(struct timespec)); + } + + port->agg_state = 0; + } + + spin_unlock_irqrestore(&port->agg_lock, flags); +} diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_private.h b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_private.h new file mode 100644 index 0000000..d384b7b --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_private.h @@ -0,0 +1,34 @@ +/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_PRIVATE_H_ +#define _RMNET_PRIVATE_H_ + +#define RMNET_MAX_PACKET_SIZE 16384 +#define RMNET_DFLT_PACKET_SIZE 1500 +#define RMNET_NEEDED_HEADROOM 16 +#define RMNET_TX_QUEUE_LEN 1000 + +/* Constants */ +#define RMNET_EGRESS_FORMAT_AGGREGATION BIT(31) +#define RMNET_INGRESS_FORMAT_DL_MARKER_V1 BIT(30) +#define RMNET_INGRESS_FORMAT_DL_MARKER_V2 BIT(29) + +#define RMNET_INGRESS_FORMAT_DL_MARKER (RMNET_INGRESS_FORMAT_DL_MARKER_V1 |\ +RMNET_INGRESS_FORMAT_DL_MARKER_V2) + +/* Replace skb->dev to a virtual rmnet device and pass up the stack */ +#define RMNET_EPMODE_VND (1) +/* Pass the frame directly to another device with dev_queue_xmit() */ +#define RMNET_EPMODE_BRIDGE (2) + +#endif /* _RMNET_PRIVATE_H_ */ diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_trace.h b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_trace.h new file mode 100644 index 0000000..d453fc5 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_trace.h @@ -0,0 +1,257 @@ +/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rmnet +#define TRACE_INCLUDE_FILE rmnet_trace + +#if !defined(_RMNET_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _RMNET_TRACE_H_ + +#include +#include +#include + +/*****************************************************************************/ +/* Trace events for rmnet module */ +/*****************************************************************************/ +DECLARE_EVENT_CLASS + (rmnet_mod_template, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2), + + TP_STRUCT__entry( + __field(u8, func) + __field(u8, evt) + __field(u32, uint1) + __field(u32, uint2) + __field(u64, ulong1) + __field(u64, ulong2) + __field(void *, ptr1) + __field(void *, ptr2) + ), + + TP_fast_assign( + __entry->func = func; + __entry->evt = evt; + __entry->uint1 = uint1; + __entry->uint2 = uint2; + __entry->ulong1 = ulong1; + __entry->ulong2 = ulong2; + __entry->ptr1 = ptr1; + __entry->ptr2 = ptr2; + ), + +TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%llu ul2:%llu p1:0x%pK p2:0x%pK", + __entry->func, __entry->evt, + __entry->uint1, __entry->uint2, + __entry->ulong1, __entry->ulong2, + __entry->ptr1, __entry->ptr2) +) + +DEFINE_EVENT + (rmnet_mod_template, rmnet_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +/*****************************************************************************/ +/* Trace events for rmnet_perf module */ +/*****************************************************************************/ +DEFINE_EVENT + (rmnet_mod_template, rmnet_perf_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_perf_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_perf_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +/*****************************************************************************/ +/* Trace events for rmnet_shs module */ +/*****************************************************************************/ +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_wq_low, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_wq_high, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DEFINE_EVENT + (rmnet_mod_template, rmnet_shs_wq_err, + + TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2, + u64 ulong1, u64 ulong2, void *ptr1, void *ptr2), + + TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2) + +); + +DECLARE_EVENT_CLASS + (rmnet_freq_template, + + TP_PROTO(u8 core, u32 newfreq), + + TP_ARGS(core, newfreq), + + TP_STRUCT__entry( + __field(u8, core) + __field(u32, newfreq) + ), + + TP_fast_assign( + __entry->core = core; + __entry->newfreq = newfreq; + + ), + +TP_printk("freq policy core:%u freq floor :%u", + __entry->core, __entry->newfreq) + +); + +DEFINE_EVENT + (rmnet_freq_template, rmnet_freq_boost, + + TP_PROTO(u8 core, u32 newfreq), + + TP_ARGS(core, newfreq) +); + +DEFINE_EVENT + (rmnet_freq_template, rmnet_freq_reset, + + TP_PROTO(u8 core, u32 newfreq), + + TP_ARGS(core, newfreq) +); + +TRACE_EVENT + (rmnet_freq_update, + + TP_PROTO(u8 core, u32 lowfreq, u32 highfreq), + + TP_ARGS(core, lowfreq, highfreq), + + TP_STRUCT__entry( + __field(u8, core) + __field(u32, lowfreq) + __field(u32, highfreq) + ), + + TP_fast_assign( + __entry->core = core; + __entry->lowfreq = lowfreq; + __entry->highfreq = highfreq; + + ), + +TP_printk("freq policy update core:%u policy freq floor :%u freq ceil :%u", + __entry->core, __entry->lowfreq, __entry->highfreq) +); +#endif /* _RMNET_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../drivers/net/ethernet/qualcomm/rmnet +#include diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.c b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.c new file mode 100644 index 0000000..4ef645d --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.c @@ -0,0 +1,382 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + * RMNET Data virtual network driver + * + */ + +#include +#include +#include +#include +#include "rmnet_config.h" +#include "rmnet_handlers.h" +#include "rmnet_private.h" +#include "rmnet_map.h" +#include "rmnet_vnd.h" + +/* RX/TX Fixup */ + +static void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_pcpu_stats *pcpu_ptr; + + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + u64_stats_update_begin(&pcpu_ptr->syncp); + pcpu_ptr->stats.rx_pkts++; + pcpu_ptr->stats.rx_bytes += skb_len; + u64_stats_update_end(&pcpu_ptr->syncp); +} + +static void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_pcpu_stats *pcpu_ptr; + + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + u64_stats_update_begin(&pcpu_ptr->syncp); + pcpu_ptr->stats.tx_pkts++; + pcpu_ptr->stats.tx_bytes += skb_len; + u64_stats_update_end(&pcpu_ptr->syncp); +} + +/* Network Device Operations */ + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rmnet_priv *priv; + + priv = netdev_priv(dev); + if (priv->real_dev) { + rmnet_egress_handler(skb); + } else { + this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); + } + return NETDEV_TX_OK; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) + return -EINVAL; + + rmnet_dev->mtu = new_mtu; + return 0; +} + +static int rmnet_vnd_get_iflink(const struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + + return priv->real_dev->ifindex; +} + +static int rmnet_vnd_init(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + int err; + + priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats); + if (!priv->pcpu_stats) + return -ENOMEM; + + err = gro_cells_init(&priv->gro_cells, dev); + if (err) { + free_percpu(priv->pcpu_stats); + return err; + } + + return 0; +} + +static void rmnet_vnd_uninit(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + + gro_cells_destroy(&priv->gro_cells); + free_percpu(priv->pcpu_stats); +} + +static struct rtnl_link_stats64* rmnet_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_vnd_stats total_stats; + struct rmnet_pcpu_stats *pcpu_ptr; + unsigned int cpu, start; + + memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); + + for_each_possible_cpu(cpu) { + pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu); + + do { + start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); + total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts; + total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes; + total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts; + total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes; + } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start)); + + total_stats.tx_drops += pcpu_ptr->stats.tx_drops; + } + + s->rx_packets = total_stats.rx_pkts; + s->rx_bytes = total_stats.rx_bytes; + s->tx_packets = total_stats.tx_pkts; + s->tx_bytes = total_stats.tx_bytes; + s->tx_dropped = total_stats.tx_drops; + + return s; +} + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_start_xmit = rmnet_vnd_start_xmit, + .ndo_change_mtu = rmnet_vnd_change_mtu, + .ndo_get_iflink = rmnet_vnd_get_iflink, + //.ndo_add_slave = rmnet_add_bridge, + //.ndo_del_slave = rmnet_del_bridge, + .ndo_init = rmnet_vnd_init, + .ndo_uninit = rmnet_vnd_uninit, + .ndo_get_stats64 = rmnet_get_stats64, +}; + +static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = { + "Checksum ok", + "Checksum valid bit not set", + "Checksum validation failed", + "Checksum error bad buffer", + "Checksum error bad ip version", + "Checksum error bad transport", + "Checksum skipped on ip fragment", + "Checksum skipped", + "Checksum computed in software", + "Checksum computed in hardware", + "Coalescing packets received", + "Coalesced packets", + "Coalescing header NLO errors", + "Coalescing header pcount errors", + "Coalescing checksum errors", + "Coalescing packet reconstructs", + "Coalescing IP version invalid", + "Coalescing L4 header invalid", + "Coalescing close Non-coalescable", + "Coalescing close L3 mismatch", + "Coalescing close L4 mismatch", + "Coalescing close HW NLO limit", + "Coalescing close HW packet limit", + "Coalescing close HW byte limit", + "Coalescing close HW time limit", + "Coalescing close HW eviction", + "Coalescing close Coalescable", + "Coalescing packets over VEID0", + "Coalescing packets over VEID1", + "Coalescing packets over VEID2", + "Coalescing packets over VEID3", +}; + +static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = { + "MAP Cmd last version", + "MAP Cmd last ep id", + "MAP Cmd last transaction id", + "DL header last seen sequence", + "DL header last seen bytes", + "DL header last seen packets", + "DL header last seen flows", + "DL header pkts received", + "DL header total bytes received", + "DL header total pkts received", + "DL trailer last seen sequence", + "DL trailer pkts received", +}; + +static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &rmnet_gstrings_stats, + sizeof(rmnet_gstrings_stats)); + memcpy(buf + sizeof(rmnet_gstrings_stats), + &rmnet_port_gstrings_stats, + sizeof(rmnet_port_gstrings_stats)); + break; + } +} + +static int rmnet_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rmnet_gstrings_stats) + + ARRAY_SIZE(rmnet_port_gstrings_stats); + default: + return -EOPNOTSUPP; + } +} + +static void rmnet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_priv_stats *st = &priv->stats; + struct rmnet_port_priv_stats *stp; + struct rmnet_port *port; + + port = rmnet_get_port(priv->real_dev); + + if (!data || !port) + return; + + stp = &port->stats; + + memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64)); + memcpy(data + ARRAY_SIZE(rmnet_gstrings_stats), stp, + ARRAY_SIZE(rmnet_port_gstrings_stats) * sizeof(u64)); +} + +static int rmnet_stats_reset(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_port_priv_stats *stp; + struct rmnet_port *port; + + port = rmnet_get_port(priv->real_dev); + if (!port) + return -EINVAL; + + stp = &port->stats; + + memset(stp, 0, sizeof(*stp)); + return 0; +} + +static const struct ethtool_ops rmnet_ethtool_ops = { + .get_ethtool_stats = rmnet_get_ethtool_stats, + .get_strings = rmnet_get_strings, + .get_sset_count = rmnet_get_sset_count, + .nway_reset = rmnet_stats_reset, +}; + +/* Called by kernel whenever a new rmnet device is created. Sets MTU, + * flags, ARP type, needed headroom, etc... + */ +void rmnet_vnd_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->netdev_ops = &rmnet_vnd_ops; + rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; + rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; + random_ether_addr(rmnet_dev->dev_addr); + rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ + rmnet_dev->type = ARPHRD_RAWIP; + rmnet_dev->hard_header_len = 0; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + //rmnet_dev->needs_free_netdev = true; + + rmnet_dev->hw_features = NETIF_F_RXCSUM; + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + //rmnet_dev->hw_features |= NETIF_F_SG; + //rmnet_dev->hw_features |= NETIF_F_GRO_HW; +} + +/* Exposed API */ + +static int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, + struct rmnet_port *port, + struct net_device *real_dev, + struct rmnet_endpoint *ep) +{ + struct rmnet_priv *priv = netdev_priv(rmnet_dev); + struct rmnet_nss_cb *nss_cb; + int rc; + + if (ep->egress_dev) + return -EINVAL; + + if (rmnet_get_endpoint(port, id)) + return -EBUSY; + + rmnet_dev->hw_features = NETIF_F_RXCSUM; + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + rmnet_dev->hw_features |= NETIF_F_SG; + + priv->real_dev = real_dev; + + rc = register_netdevice(rmnet_dev); + if (!rc) { + ep->egress_dev = rmnet_dev; + ep->mux_id = id; + port->nr_rmnet_devs++; + + //rmnet_dev->rtnl_link_ops = &rmnet_link_ops; + + priv->mux_id = id; + + netdev_dbg(rmnet_dev, "rmnet dev created\n"); + } + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) { + rc = nss_cb->nss_create(rmnet_dev); + if (rc) { + /* Log, but don't fail the device creation */ + netdev_err(rmnet_dev, "Device will not use NSS path: %d\n", rc); + rc = 0; + } else { + netdev_dbg(rmnet_dev, "NSS context created\n"); + } + } + + return rc; +} + +static int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, + struct rmnet_endpoint *ep) +{ + struct rmnet_nss_cb *nss_cb; + + if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev) + return -EINVAL; + + if (ep->egress_dev) { + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) + nss_cb->nss_free(ep->egress_dev); + } + ep->egress_dev = NULL; + port->nr_rmnet_devs--; + + return 0; +} + +static int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) +{ + netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable); + /* Although we expect similar number of enable/disable + * commands, optimize for the disable. That is more + * latency sensitive than enable + */ + if (unlikely(enable)) + netif_wake_queue(rmnet_dev); + else + netif_stop_queue(rmnet_dev); + + return 0; +} diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.h b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.h new file mode 100644 index 0000000..f8a65a9 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet/rmnet_vnd.h @@ -0,0 +1,29 @@ +/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data Virtual Network Device APIs + * + */ + +#ifndef _RMNET_VND_H_ +#define _RMNET_VND_H_ + +static int rmnet_vnd_do_flow_control(struct net_device *dev, int enable); +static int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, + struct rmnet_port *port, + struct net_device *real_dev, + struct rmnet_endpoint *ep); +static int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, + struct rmnet_endpoint *ep); +static void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len); +static void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len); +static void rmnet_vnd_setup(struct net_device *dev); +#endif /* _RMNET_VND_H_ */ diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet_handler.c b/wwan/driver/quectel_MHI/src/devices/rmnet_handler.c new file mode 100644 index 0000000..b100262 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet_handler.c @@ -0,0 +1,1129 @@ +#if 0 + +#define RMNET_MAX_PACKET_SIZE 16384 +#define RMNET_DFLT_PACKET_SIZE 1500 +#define RMNET_NEEDED_HEADROOM 16 +#define RMNET_TX_QUEUE_LEN 1000 + +#define RMNET_MAX_LOGICAL_EP 255 +#define RMNET_MAP_DESC_HEADROOM 128 +#define RMNET_FRAG_DESCRIPTOR_POOL_SIZE 64 + +/* Pass the frame up the stack with no modifications to skb->dev */ +#define RMNET_EPMODE_NONE (0) +/* Replace skb->dev to a virtual rmnet device and pass up the stack */ +#define RMNET_EPMODE_VND (1) +/* Pass the frame directly to another device with dev_queue_xmit() */ +#define RMNET_EPMODE_BRIDGE (2) + +/* rmnet section */ + +#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) +#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_COALESCE (1U << 4) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 5) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 6) + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +/* Main QMAP header */ +struct rmnet_map_header { + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; + __be16 reserved; +} __aligned(1); + +struct rmnet_map_v5_nl_pair { + __be16 pkt_len; + u8 csum_error_bitmap; + u8 num_packets; +} __aligned(1); + +/* NLO: Number-length object */ +#define RMNET_MAP_V5_MAX_NLOS (6) +#define RMNET_MAP_V5_MAX_PACKETS (48) + +struct rmnet_map_v5_coal_header { + u8 next_hdr:1; + u8 header_type:7; + u8 reserved1:4; + u8 num_nlos:3; + u8 csum_valid:1; + u8 close_type:4; + u8 close_value:4; + u8 reserved2:4; + u8 virtual_channel_id:4; + + struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS]; +} __aligned(1); + +/* QMAP v4 headers */ +struct rmnet_map_dl_csum_trailer { + u8 reserved1; + u8 valid:1; + u8 reserved2:7; + u16 csum_start_offset; + u16 csum_length; + __be16 csum_value; +} __aligned(1); + +struct rmnet_frag_descriptor_pool { + struct list_head free_list; + u32 pool_size; +}; + +struct rmnet_frag_descriptor { + struct list_head list; + struct list_head sub_frags; + skb_frag_t frag; + u8 *hdr_ptr; + struct net_device *dev; + u32 hash; + __be32 tcp_seq; + __be16 ip_id; + u16 data_offset; + u16 gso_size; + u16 gso_segs; + u16 ip_len; + u16 trans_len; + u8 ip_proto; + u8 trans_proto; + u8 pkt_id; + u8 csum_valid:1, + hdrs_valid:1, + ip_id_set:1, + tcp_seq_set:1, + flush_shs:1, + reserved:3; +}; + +struct rmnet_endpoint { + u8 rmnet_mode; + u8 mux_id; + struct net_device *rmnet_dev; +}; + +/* One instance of this structure is instantiated for each real_dev associated + * with rmnet. + */ +struct rmnet_port { + struct net_device *dev; + u8 rmnet_mode; + u32 data_format; + u32 nr_rmnet_devs; + struct rmnet_endpoint muxed_ep[16]; + + /* Descriptor pool */ + spinlock_t desc_pool_lock; + struct rmnet_frag_descriptor_pool *frag_desc_pool; + struct sk_buff *chain_head; + struct sk_buff *chain_tail; +}; + +static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) +{ + struct rmnet_map_header *map_header; + struct rmnet_map_v5_csum_header *ul_header; + u32 padding, map_datalen; + + map_datalen = skb->len; + padding = map_datalen%4; + if (padding) { + padding = 4 - padding; + if (skb_tailroom(skb) < padding) { + printk("skb_tailroom small!\n"); + padding = 0; + } + if (padding) + __skb_put(skb, padding); + } + + map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header))); + + BUILD_BUG_ON((sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header)) != 8); + + map_header->cd_bit = 0; + map_header->next_hdr = 1; + map_header->pad_len = padding; + map_header->mux_id = mux_id; + map_header->pkt_len = htons(map_datalen + padding); + + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + + return skb; +} + +struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id) +{ + return &port->muxed_ep[0]; +} + +static void +rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port) +{ + struct rmnet_nss_cb *nss_cb; + + //rmnet_vnd_rx_fixup(skb->dev, skb->len); + + /* Pass off the packet to NSS driver if we can */ + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) { + if (!port->chain_head) + port->chain_head = skb; + else + skb_shinfo(port->chain_tail)->frag_list = skb; + + port->chain_tail = skb; + return; + } + + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + skb->pkt_type = PACKET_HOST; + skb_set_mac_header(skb, 0); + + //if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) { + //} else { + //if (!rmnet_check_skb_can_gro(skb)) + // gro_cells_receive(&priv->gro_cells, skb); + //else + netif_receive_skb(skb); + //} +} + +static inline unsigned char *rmnet_map_data_ptr(struct sk_buff *skb) +{ + /* Nonlinear packets we receive are entirely within frag 0 */ + if (skb_is_nonlinear(skb) && skb->len == skb->data_len) + return skb_frag_address(skb_shinfo(skb)->frags); + + return skb->data; +} + +static inline void *rmnet_frag_data_ptr(struct rmnet_frag_descriptor *frag_desc) +{ + return skb_frag_address(&frag_desc->frag); +} + +static struct rmnet_frag_descriptor * +rmnet_get_frag_descriptor(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct rmnet_frag_descriptor *frag_desc; + + spin_lock(&port->desc_pool_lock); + if (!list_empty(&pool->free_list)) { + frag_desc = list_first_entry(&pool->free_list, + struct rmnet_frag_descriptor, + list); + list_del_init(&frag_desc->list); + } else { + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + goto out; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + pool->pool_size++; + } + +out: + spin_unlock(&port->desc_pool_lock); + return frag_desc; +} + +static void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; + struct page *page = skb_frag_page(&frag_desc->frag); + + list_del(&frag_desc->list); + if (page) + put_page(page); + + memset(frag_desc, 0, sizeof(*frag_desc)); + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + spin_lock(&port->desc_pool_lock); + list_add_tail(&frag_desc->list, &pool->free_list); + spin_unlock(&port->desc_pool_lock); +} + +static inline void rmnet_frag_fill(struct rmnet_frag_descriptor *frag_desc, + struct page *p, u32 page_offset, u32 len) +{ + get_page(p); + __skb_frag_set_page(&frag_desc->frag, p); + skb_frag_size_set(&frag_desc->frag, len); + frag_desc->frag.page_offset = page_offset; +} + +static inline void *rmnet_frag_pull(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (size >= skb_frag_size(&frag_desc->frag)) { + pr_info("%s(): Pulling %u bytes from %u byte pkt. Dropping\n", + __func__, size, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + frag_desc->frag.page_offset += size; + skb_frag_size_sub(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline void *rmnet_frag_trim(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + unsigned int size) +{ + if (!size) { + pr_info("%s(): Trimming %u byte pkt to 0. Dropping\n", + __func__, skb_frag_size(&frag_desc->frag)); + rmnet_recycle_frag_descriptor(frag_desc, port); + return NULL; + } + + if (size < skb_frag_size(&frag_desc->frag)) + skb_frag_size_set(&frag_desc->frag, size); + + return rmnet_frag_data_ptr(frag_desc); +} + +static inline u8 +rmnet_frag_get_next_hdr_type(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_coal_header *)data)->header_type; +} + +static inline bool +rmnet_frag_get_csum_valid(struct rmnet_frag_descriptor *frag_desc) +{ + unsigned char *data = rmnet_frag_data_ptr(frag_desc); + + data += sizeof(struct rmnet_map_header); + return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required; +} + +static void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, + struct page *p, u32 page_offset, u32 len) +{ + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = rmnet_get_frag_descriptor(port); + if (!frag_desc) + return; + + rmnet_frag_fill(frag_desc, p, page_offset, len); + list_add_tail(&frag_desc->list, list); +} + +static void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, + struct list_head *list) +{ + struct rmnet_map_header *maph; + u8 *data = skb_frag_address(frag); + u32 offset = 0; + u32 packet_len; + + while (offset < skb_frag_size(frag)) { + maph = (struct rmnet_map_header *)data; + packet_len = ntohs(maph->pkt_len); + + /* Some hardware can send us empty frames. Catch them */ + if (packet_len == 0) + return; + + packet_len += sizeof(*maph); + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + WARN_ON(1); + } else if (port->data_format & + (RMNET_FLAGS_INGRESS_MAP_CKSUMV5 | + RMNET_FLAGS_INGRESS_COALESCE) && !maph->cd_bit) { + u32 hsize = 0; + u8 type; + + type = ((struct rmnet_map_v5_coal_header *) + (data + sizeof(*maph)))->header_type; + switch (type) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + hsize = sizeof(struct rmnet_map_v5_coal_header); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + hsize = sizeof(struct rmnet_map_v5_csum_header); + break; + } + + packet_len += hsize; + } + else { + qmap_hex_dump(__func__, data, 64); + WARN_ON(1); + } + + if ((int)skb_frag_size(frag) - (int)packet_len < 0) + return; + + rmnet_descriptor_add_frag(port, list, skb_frag_page(frag), + frag->page_offset + offset, + packet_len); + + offset += packet_len; + data += packet_len; + } +} + + +#define RMNET_IP_VERSION_4 0x40 +#define RMNET_IP_VERSION_6 0x60 + +/* Helper Functions */ + +static void rmnet_set_skb_proto(struct sk_buff *skb) +{ + switch (rmnet_map_data_ptr(skb)[0] & 0xF0) { + case RMNET_IP_VERSION_4: + skb->protocol = htons(ETH_P_IP); + break; + case RMNET_IP_VERSION_6: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + skb->protocol = htons(ETH_P_MAP); + WARN_ON(1); + break; + } +} + +/* Allocate and populate an skb to contain the packet represented by the + * frag descriptor. + */ +static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *head_skb, *current_skb, *skb; + struct skb_shared_info *shinfo; + struct rmnet_frag_descriptor *sub_frag, *tmp; + + /* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */ + if (frag_desc->hdrs_valid) { + u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len; + + head_skb = alloc_skb(hdr_len + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len); + skb_reset_network_header(head_skb); + + if (frag_desc->trans_len) + skb_set_transport_header(head_skb, frag_desc->ip_len); + + /* Packets that have no data portion don't need any frags */ + if (hdr_len == skb_frag_size(&frag_desc->frag)) + goto skip_frags; + + /* If the headers we added are the start of the page, + * we don't want to add them twice + */ + if (frag_desc->hdr_ptr == rmnet_frag_data_ptr(frag_desc)) { + if (!rmnet_frag_pull(frag_desc, port, hdr_len)) { + kfree_skb(head_skb); + return NULL; + } + } + } else { + /* Allocate enough space to avoid penalties in the stack + * from __pskb_pull_tail() + */ + head_skb = alloc_skb(256 + RMNET_MAP_DESC_HEADROOM, + GFP_ATOMIC); + if (!head_skb) + return NULL; + + skb_reserve(head_skb, RMNET_MAP_DESC_HEADROOM); + } + + /* Add main fragment */ + get_page(skb_frag_page(&frag_desc->frag)); + skb_add_rx_frag(head_skb, 0, skb_frag_page(&frag_desc->frag), + frag_desc->frag.page_offset, + skb_frag_size(&frag_desc->frag), + skb_frag_size(&frag_desc->frag)); + + shinfo = skb_shinfo(head_skb); + current_skb = head_skb; + + /* Add in any frags from rmnet_perf */ + list_for_each_entry_safe(sub_frag, tmp, &frag_desc->sub_frags, list) { + skb_frag_t *frag; + u32 frag_size; + + frag = &sub_frag->frag; + frag_size = skb_frag_size(frag); + +add_frag: + if (shinfo->nr_frags < MAX_SKB_FRAGS) { + get_page(skb_frag_page(frag)); + skb_add_rx_frag(current_skb, shinfo->nr_frags, + skb_frag_page(frag), frag->page_offset, + frag_size, frag_size); + if (current_skb != head_skb) { + head_skb->len += frag_size; + head_skb->data_len += frag_size; + } + } else { + /* Alloc a new skb and try again */ + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + break; + + if (current_skb == head_skb) + shinfo->frag_list = skb; + else + current_skb->next = skb; + + current_skb = skb; + shinfo = skb_shinfo(current_skb); + goto add_frag; + } + + rmnet_recycle_frag_descriptor(sub_frag, port); + } + +skip_frags: + head_skb->dev = frag_desc->dev; + rmnet_set_skb_proto(head_skb); + + /* Handle any header metadata that needs to be updated after RSB/RSC + * segmentation + */ + if (frag_desc->ip_id_set) { + struct iphdr *iph; + + iph = (struct iphdr *)rmnet_map_data_ptr(head_skb); + csum_replace2(&iph->check, iph->id, frag_desc->ip_id); + iph->id = frag_desc->ip_id; + } + + if (frag_desc->tcp_seq_set) { + struct tcphdr *th; + + th = (struct tcphdr *) + (rmnet_map_data_ptr(head_skb) + frag_desc->ip_len); + th->seq = frag_desc->tcp_seq; + } + + /* Handle csum offloading */ + if (frag_desc->csum_valid && frag_desc->hdrs_valid) { + /* Set the partial checksum information */ + //rmnet_frag_partial_csum(head_skb, frag_desc); + WARN_ON(1); + } else if (frag_desc->csum_valid) { + /* Non-RSB/RSC/perf packet. The current checksum is fine */ + head_skb->ip_summed = CHECKSUM_UNNECESSARY; + } else if (frag_desc->hdrs_valid && + (frag_desc->trans_proto == IPPROTO_TCP || + frag_desc->trans_proto == IPPROTO_UDP)) { + /* Unfortunately, we have to fake a bad checksum here, since + * the original bad value is lost by the hardware. The only + * reliable way to do it is to calculate the actual checksum + * and corrupt it. + */ + __sum16 *check; + __wsum csum; + unsigned int offset = skb_transport_offset(head_skb); + __sum16 pseudo; + + WARN_ON(1); + /* Calculate pseudo header and update header fields */ + if (frag_desc->ip_proto == 4) { + struct iphdr *iph = ip_hdr(head_skb); + __be16 tot_len = htons(head_skb->len); + + csum_replace2(&iph->check, iph->tot_len, tot_len); + iph->tot_len = tot_len; + pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } else { + struct ipv6hdr *ip6h = ipv6_hdr(head_skb); + + ip6h->payload_len = htons(head_skb->len - + sizeof(*ip6h)); + pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + head_skb->len - + frag_desc->ip_len, + frag_desc->trans_proto, 0); + } + + if (frag_desc->trans_proto == IPPROTO_TCP) { + check = &tcp_hdr(head_skb)->check; + } else { + udp_hdr(head_skb)->len = htons(head_skb->len - + frag_desc->ip_len); + check = &udp_hdr(head_skb)->check; + } + + *check = pseudo; + csum = skb_checksum(head_skb, offset, head_skb->len - offset, + 0); + /* Add 1 to corrupt. This cannot produce a final value of 0 + * since csum_fold() can't return a value of 0xFFFF + */ + *check = csum16_add(csum_fold(csum), htons(1)); + head_skb->ip_summed = CHECKSUM_NONE; + } + + /* Handle any rmnet_perf metadata */ + if (frag_desc->hash) { + head_skb->hash = frag_desc->hash; + head_skb->sw_hash = 1; + } + + if (frag_desc->flush_shs) + head_skb->cb[0] = 1; + + /* Handle coalesced packets */ + //if (frag_desc->gso_segs > 1) + // rmnet_frag_gso_stamp(head_skb, frag_desc); + + return head_skb; +} + +/* Deliver the packets contained within a frag descriptor */ +static void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct sk_buff *skb; + + skb = rmnet_alloc_skb(frag_desc, port); + if (skb) + rmnet_deliver_skb(skb, port); + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +/* Process a QMAPv5 packet header */ +static int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port, + struct list_head *list, + u16 len) +{ + int rc = 0; + + switch (rmnet_frag_get_next_hdr_type(frag_desc)) { + case RMNET_MAP_HEADER_TYPE_COALESCING: + rc = -1; + WARN_ON(1); + break; + case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD: + if (rmnet_frag_get_csum_valid(frag_desc)) { + frag_desc->csum_valid = true; + } else { + } + + if (!rmnet_frag_pull(frag_desc, port, + sizeof(struct rmnet_map_header) + + sizeof(struct rmnet_map_v5_csum_header))) { + rc = -EINVAL; + break; + } + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + /* Remove padding only for csum offload packets. + * Coalesced packets should never have padding. + */ + if (!rmnet_frag_trim(frag_desc, port, len)) { + rc = -EINVAL; + break; + } + + list_del_init(&frag_desc->list); + list_add_tail(&frag_desc->list, list); + break; + default: + qmap_hex_dump(__func__, rmnet_frag_data_ptr(frag_desc), 64); + rc = -EINVAL; + break; + } + + return rc; +} + +static void +__rmnet_frag_ingress_handler(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_map_header *qmap; + struct rmnet_endpoint *ep; + struct rmnet_frag_descriptor *frag, *tmp; + LIST_HEAD(segs); + u16 len, pad; + u8 mux_id; + + qmap = (struct rmnet_map_header *)skb_frag_address(&frag_desc->frag); + mux_id = qmap->mux_id; + pad = qmap->pad_len; + len = ntohs(qmap->pkt_len) - pad; + + if (qmap->cd_bit) { + goto recycle; + } + + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto recycle; + + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto recycle; + + frag_desc->dev = ep->rmnet_dev; + + /* Handle QMAPv5 packet */ + if (qmap->next_hdr && + (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) { + if (rmnet_frag_process_next_hdr_packet(frag_desc, port, &segs, + len)) + goto recycle; + } else { + /* We only have the main QMAP header to worry about */ + if (!rmnet_frag_pull(frag_desc, port, sizeof(*qmap))) + return; + + frag_desc->hdr_ptr = rmnet_frag_data_ptr(frag_desc); + + if (!rmnet_frag_trim(frag_desc, port, len)) + return; + + list_add_tail(&frag_desc->list, &segs); + } + + list_for_each_entry_safe(frag, tmp, &segs, list) { + list_del_init(&frag->list); + rmnet_frag_deliver(frag, port); + } + return; + +recycle: + rmnet_recycle_frag_descriptor(frag_desc, port); +} + +static void rmnet_frag_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + LIST_HEAD(desc_list); + int i = 0; + struct rmnet_nss_cb *nss_cb; + + /* Deaggregation and freeing of HW originating + * buffers is done within here + */ + while (skb) { + struct sk_buff *skb_frag; + + port->chain_head = NULL; + port->chain_tail = NULL; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + rmnet_frag_deaggregate(&skb_shinfo(skb)->frags[i], port, + &desc_list); + if (!list_empty(&desc_list)) { + struct rmnet_frag_descriptor *frag_desc, *tmp; + + list_for_each_entry_safe(frag_desc, tmp, + &desc_list, list) { + list_del_init(&frag_desc->list); + __rmnet_frag_ingress_handler(frag_desc, + port); + } + } + } + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb && port->chain_head) { + port->chain_head->cb[0] = 0; + netif_receive_skb(port->chain_head); + } + + skb_frag = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = NULL; + consume_skb(skb); + skb = skb_frag; + } +} + +static void +rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_port *port) +{ + if (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE | + RMNET_FLAGS_INGRESS_MAP_CKSUMV5)) { + if (skb_is_nonlinear(skb)) { + rmnet_frag_ingress_handler(skb, port); + return; + } + } + + WARN_ON(1); +} + +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb); +static int rmnet_is_real_dev_registered(const struct net_device *real_dev) +{ + return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; +} + + +/* Needs either rcu_read_lock() or rtnl lock */ +struct rmnet_port *rmnet_get_port(struct net_device *real_dev) +{ + if (rmnet_is_real_dev_registered(real_dev)) + return rcu_dereference_rtnl(real_dev->rx_handler_data); + else + return NULL; +} + +static rx_handler_result_t rmnet_rx_priv_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_nss_cb *nss_cb; + + if (!skb) + return RX_HANDLER_CONSUMED; + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + /* Check this so that we dont loop around netif_receive_skb */ + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + skb->dev->stats.rx_packets++; + return RX_HANDLER_PASS; + } + + while (skb) { + struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list; + + skb_shinfo(skb)->frag_list = NULL; + + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (nss_cb) + nss_cb->nss_tx(skb); + + skb = skb_frag; + } + + return RX_HANDLER_CONSUMED; +} + +/* Ingress / Egress Entry Points */ + +/* Processes packet as per ingress data format for receiving device. Logical + * endpoint is determined from packet inspection. Packet is then sent to the + * egress device listed in the logical endpoint configuration. + */ +static rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct rmnet_port *port; + struct net_device *dev; + + if (!skb) + goto done; + + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + if (skb->protocol != htons(ETH_P_MAP)) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + + dev = skb->dev; + port = rmnet_get_port(dev); + + if (port == NULL) + return RX_HANDLER_PASS; + + port->chain_head = NULL; + port->chain_tail = NULL; + + switch (port->rmnet_mode) { + case RMNET_EPMODE_VND: + rmnet_map_ingress_handler(skb, port); + break; + case RMNET_EPMODE_BRIDGE: + //rmnet_bridge_handler(skb, port->bridge_ep); + break; + } + +done: + return RX_HANDLER_CONSUMED; +} + +static void rmnet_descriptor_deinit(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + struct rmnet_frag_descriptor *frag_desc, *tmp; + + pool = port->frag_desc_pool; + + list_for_each_entry_safe(frag_desc, tmp, &pool->free_list, list) { + kfree(frag_desc); + pool->pool_size--; + } + + kfree(pool); +} + +static int rmnet_descriptor_init(struct rmnet_port *port) +{ + struct rmnet_frag_descriptor_pool *pool; + int i; + + spin_lock_init(&port->desc_pool_lock); + pool = kzalloc(sizeof(*pool), GFP_ATOMIC); + if (!pool) + return -ENOMEM; + + INIT_LIST_HEAD(&pool->free_list); + port->frag_desc_pool = pool; + + for (i = 0; i < RMNET_FRAG_DESCRIPTOR_POOL_SIZE; i++) { + struct rmnet_frag_descriptor *frag_desc; + + frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); + if (!frag_desc) + return -ENOMEM; + + INIT_LIST_HEAD(&frag_desc->list); + INIT_LIST_HEAD(&frag_desc->sub_frags); + list_add_tail(&frag_desc->list, &pool->free_list); + pool->pool_size++; + } + + return 0; +} + +struct rmnet_priv { + //struct rmnet_endpoint local_ep; + struct net_device *real_dev; + u8 mux_id; +}; + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rmnet_priv *priv; + + if (nss_debug) printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + priv = netdev_priv(dev); + if (priv->real_dev) { + add_qhdr_v5(skb, priv->mux_id); + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + dev_queue_xmit(skb); + dev->stats.tx_packets++; + //rmnet_egress_handler(skb); + } else { + //this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); + } + return NETDEV_TX_OK; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE) + return -EINVAL; + + rmnet_dev->mtu = new_mtu; + return 0; +} + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_start_xmit = rmnet_vnd_start_xmit, + .ndo_change_mtu = rmnet_vnd_change_mtu, + //.ndo_get_iflink = rmnet_vnd_get_iflink, + //.ndo_add_slave = rmnet_add_bridge, + //.ndo_del_slave = rmnet_del_bridge, + //.ndo_init = rmnet_vnd_init, + //.ndo_uninit = rmnet_vnd_uninit, + //.ndo_get_stats64 = rmnet_get_stats64, +}; + +static void rmnet_vnd_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->netdev_ops = &rmnet_vnd_ops; + rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; + rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM; + random_ether_addr(rmnet_dev->dev_addr); + rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ + rmnet_dev->type = ARPHRD_RAWIP; + rmnet_dev->hard_header_len = 0; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + //rmnet_dev->needs_free_netdev = true; + + rmnet_dev->hw_features = NETIF_F_RXCSUM; + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + //rmnet_dev->hw_features |= NETIF_F_SG; + //rmnet_dev->hw_features |= NETIF_F_GRO_HW; +} +#else +#include +#include +#include +#include +#include +#include +#include +#include + +static uint nss_debug = 0; +module_param( nss_debug, uint, S_IRUGO | S_IWUSR); + +/* rmnet section */ + +#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) +#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) +#define RMNET_FLAGS_INGRESS_COALESCE (1U << 4) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 5) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 6) + +#ifdef CONFIG_ARCH_IPQ807x +#define CONFIG_QCA_NSS_DRV +#endif +#ifdef CONFIG_QCA_NSS_DRV +#include "rmnet/rmnet_nss.c" +#else +#include "rmnet_nss.h" +#endif + +#include "rmnet/rmnet_vnd.c" +#include "rmnet/rmnet_map_command.c" +#include "rmnet/rmnet_map_data.c" +#include "rmnet/rmnet_descriptor.c" +#include "rmnet/rmnet_config.c" +#include "rmnet/rmnet_handlers.c" + +struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; + +void rmnet_init(struct net_device *real_dev, u32 nr_rmnet_devs) +{ + struct rmnet_port *port; + struct rmnet_endpoint *ep; + struct net_device *rmnet_dev = NULL; + u32 nr = 0; + struct rmnet_nss_cb *nss_cb = rcu_dereference(rmnet_nss_callbacks); + + if (!nss_cb) { +#ifdef CONFIG_QCA_NSS_DRV + rmnet_nss_init(); +#endif + } + + rmnet_register_real_device(real_dev); + + port = rmnet_get_port_rtnl(real_dev); + + port->data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION + | RMNET_FLAGS_INGRESS_MAP_CKSUMV5 | RMNET_FLAGS_EGRESS_MAP_CKSUMV5; + port->rmnet_mode = RMNET_EPMODE_VND; + + for (nr = 0; nr < nr_rmnet_devs; nr++) { + u8 mux_id = 0x81+nr; + + rmnet_dev = alloc_netdev(sizeof(struct rmnet_priv), + "rmnet_data%d", NET_NAME_PREDICTABLE, + rmnet_vnd_setup); + + ep = kzalloc(sizeof(*ep), GFP_ATOMIC); + + rmnet_vnd_newlink(mux_id, rmnet_dev, port, real_dev, ep); + netdev_rx_handler_register(rmnet_dev, rmnet_rx_priv_handler, NULL); + hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); + } + + port->nr_rmnet_devs = nr_rmnet_devs; +} + +void rmnet_deinit(struct net_device *real_dev, u32 nr_rmnet_devs) +{ + struct rmnet_port *port; + u32 nr = 0; + struct rmnet_nss_cb *nss_cb = rcu_dereference(rmnet_nss_callbacks); + + port = rmnet_get_port_rtnl(real_dev); + + if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) + return; + + port = rmnet_get_port_rtnl(real_dev); + + for (nr = 0; nr < nr_rmnet_devs; nr++) { + struct rmnet_endpoint *ep; + u8 mux_id = 0x81+nr; + + ep = rmnet_get_endpoint(port, mux_id); + if (ep) { + hlist_del_init_rcu(&ep->hlnode); + rmnet_vnd_dellink(mux_id, port, ep); + synchronize_rcu(); + kfree(ep); + } + } + + rmnet_unregister_real_device(real_dev, port); + + if (nss_cb) { +#ifdef CONFIG_QCA_NSS_DRV + rmnet_nss_exit(); +#endif + } +} +#endif diff --git a/wwan/driver/quectel_MHI/src/devices/rmnet_nss.c b/wwan/driver/quectel_MHI/src/devices/rmnet_nss.c new file mode 100644 index 0000000..1165910 --- /dev/null +++ b/wwan/driver/quectel_MHI/src/devices/rmnet_nss.c @@ -0,0 +1,498 @@ +/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef _RMNET_NSS_H_ +#define _RMENT_NSS_H_ + +struct rmnet_nss_cb { + int (*nss_create)(struct net_device *dev); + int (*nss_free)(struct net_device *dev); + int (*nss_tx)(struct sk_buff *skb); +}; + +extern struct rmnet_nss_cb *rmnet_nss_callbacks; + +#endif + +#define RMNET_NSS_HASH_BITS 8 +#define hash_add_ptr(table, node, key) \ + hlist_add_head(node, &table[hash_ptr(key, HASH_BITS(table))]) + +static DEFINE_HASHTABLE(rmnet_nss_ctx_hashtable, RMNET_NSS_HASH_BITS); + +struct rmnet_nss_ctx { + struct hlist_node hnode; + struct net_device *rmnet_dev; + struct nss_rmnet_rx_handle *nss_ctx; +}; + +enum __rmnet_nss_stat { + RMNET_NSS_RX_ETH, + RMNET_NSS_RX_FAIL, + RMNET_NSS_RX_NON_ETH, + RMNET_NSS_RX_BUSY, + RMNET_NSS_TX_NO_CTX, + RMNET_NSS_TX_SUCCESS, + RMNET_NSS_TX_FAIL, + RMNET_NSS_TX_NONLINEAR, + RMNET_NSS_TX_BAD_IP, + RMNET_NSS_EXCEPTIONS, + RMNET_NSS_EX_BAD_HDR, + RMNET_NSS_EX_BAD_IP, + RMNET_NSS_EX_SUCCESS, + RMNET_NSS_TX_BAD_FRAGS, + RMNET_NSS_TX_LINEARIZE_FAILS, + RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS, + RMNET_NSS_TX_BUSY_LOOP, + RMNET_NSS_NUM_STATS, +}; + +static unsigned long rmnet_nss_stats[RMNET_NSS_NUM_STATS]; +extern void qmi_rmnet_mark_skb(struct net_device *dev, struct sk_buff *skb); +static void (*rmnet_mark_skb)(struct net_device *dev, struct sk_buff *skb); + +#define RMNET_NSS_STAT(name, counter, desc) \ + module_param_named(name, rmnet_nss_stats[counter], ulong, 0444); \ + MODULE_PARM_DESC(name, desc) + +RMNET_NSS_STAT(rmnet_nss_rx_ethernet, RMNET_NSS_RX_ETH, + "Number of Ethernet headers successfully removed"); +RMNET_NSS_STAT(rmnet_nss_rx_fail, RMNET_NSS_RX_FAIL, + "Number of Ethernet headers that could not be removed"); +RMNET_NSS_STAT(rmnet_nss_rx_non_ethernet, RMNET_NSS_RX_NON_ETH, + "Number of non-Ethernet packets received"); +RMNET_NSS_STAT(rmnet_nss_rx_busy, RMNET_NSS_RX_BUSY, + "Number of packets dropped decause rmnet_data device was busy"); +RMNET_NSS_STAT(rmnet_nss_tx_slow, RMNET_NSS_TX_NO_CTX, + "Number of packets sent over non-NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_fast, RMNET_NSS_TX_SUCCESS, + "Number of packets sent over NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_fail, RMNET_NSS_TX_FAIL, + "Number of packets that NSS could not transmit"); +RMNET_NSS_STAT(rmnet_nss_tx_nonlinear, RMNET_NSS_TX_NONLINEAR, + "Number of non linear sent over NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_invalid_ip, RMNET_NSS_TX_BAD_IP, + "Number of ingress packets with invalid IP headers"); +RMNET_NSS_STAT(rmnet_nss_tx_invalid_frags, RMNET_NSS_TX_BAD_FRAGS, + "Number of ingress packets with invalid frag format"); +RMNET_NSS_STAT(rmnet_nss_tx_linearize_fail, RMNET_NSS_TX_LINEARIZE_FAILS, + "Number of ingress packets where linearize in tx fails"); +RMNET_NSS_STAT(rmnet_nss_tx_exceptions, RMNET_NSS_EXCEPTIONS, + "Number of times our DL exception handler was invoked"); +RMNET_NSS_STAT(rmnet_nss_exception_non_ethernet, RMNET_NSS_EX_BAD_HDR, + "Number of non-Ethernet exception packets"); +RMNET_NSS_STAT(rmnet_nss_exception_invalid_ip, RMNET_NSS_EX_BAD_IP, + "Number of exception packets with invalid IP headers"); +RMNET_NSS_STAT(rmnet_nss_exception_success, RMNET_NSS_EX_SUCCESS, + "Number of exception packets handled successfully"); +RMNET_NSS_STAT(rmnet_nss_tx_non_zero_headlen_frags, RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS, + "Number of packets with non zero headlen"); +RMNET_NSS_STAT(rmnet_nss_tx_busy_loop, RMNET_NSS_TX_BUSY_LOOP, + "Number of times tx packets busy looped"); + +static void rmnet_nss_inc_stat(enum __rmnet_nss_stat stat) +{ + if (stat >= 0 && stat < RMNET_NSS_NUM_STATS) + rmnet_nss_stats[stat]++; +} + +static struct rmnet_nss_ctx *rmnet_nss_find_ctx(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + struct hlist_head *bucket; + u32 hash; + + hash = hash_ptr(dev, HASH_BITS(rmnet_nss_ctx_hashtable)); + bucket = &rmnet_nss_ctx_hashtable[hash]; + hlist_for_each_entry(ctx, bucket, hnode) { + if (ctx->rmnet_dev == dev) + return ctx; + } + + return NULL; +} + +static void rmnet_nss_free_ctx(struct rmnet_nss_ctx *ctx) +{ + if (ctx) { + hash_del(&ctx->hnode); + nss_rmnet_rx_xmit_callback_unregister(ctx->nss_ctx); + nss_rmnet_rx_destroy_sync(ctx->nss_ctx); + kfree(ctx); + } +} + +/* Pull off an ethernet header, if possible */ +static int rmnet_nss_ethhdr_pull(struct sk_buff *skb) +{ + if (!skb->protocol || skb->protocol == htons(ETH_P_802_3)) { + void *ret = skb_pull(skb, sizeof(struct ethhdr)); + + rmnet_nss_inc_stat((ret) ? RMNET_NSS_RX_ETH : + RMNET_NSS_RX_FAIL); + return !ret; + } + + rmnet_nss_inc_stat(RMNET_NSS_RX_NON_ETH); + return -1; +} +static int rmnet_nss_handle_non_zero_headlen(struct sk_buff *skb) +{ + struct iphdr *iph; + u8 transport; + + if (skb_headlen(skb) < sizeof(struct iphdr)){ + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + return -EINVAL; + } + + iph = (struct iphdr *)skb->data; + + if (iph->version == 4) { + transport = iph->protocol; + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + transport = ip6h->nexthdr; + } else { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + return -EINVAL; + } + +/* Assumption: required headers are copied in case of TCP/UDP by SFE */ +/* In case of TCP/UDP where there are no IP extension headers, the assumption is that SFE copied the IP and Transport header */ + + if (transport != IPPROTO_TCP && transport != IPPROTO_UDP) { + if (skb_linearize(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_LINEARIZE_FAILS); + return -EINVAL; + } + } + else if ((transport == IPPROTO_TCP && skb_headlen(skb) < 40) || (transport == IPPROTO_UDP && skb_headlen(skb) < 28)) { + pr_err_ratelimited("rmnet_nss: error: Partial copy of headers\n"); + return -EINVAL; + } + + return 0; +} + +/* Copy headers to linear section for non linear packets */ +static int rmnet_nss_adjust_header(struct sk_buff *skb) +{ + struct iphdr *iph; + skb_frag_t *frag; + int bytes = 0; + u8 transport; + + if (skb_shinfo(skb)->nr_frags != 1) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS); + return -EINVAL; + } + + if (skb_headlen(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS); + return rmnet_nss_handle_non_zero_headlen(skb); + } + + frag = &skb_shinfo(skb)->frags[0]; + + iph = (struct iphdr *)(skb_frag_address(frag)); + + if (iph->version == 4) { + bytes = iph->ihl*4; + transport = iph->protocol; + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + + bytes = sizeof(struct ipv6hdr); + /* Dont have to account for extension headers yet */ + transport = ip6h->nexthdr; + } else { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + return -EINVAL; + } + + if (transport == IPPROTO_TCP) { + struct tcphdr *th; + + th = (struct tcphdr *)((u8 *)iph + bytes); + bytes += th->doff * 4; + } else if (transport == IPPROTO_UDP) { + bytes += sizeof(struct udphdr); + } else { + /* cant do anything else here unfortunately so linearize */ + if (skb_linearize(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_LINEARIZE_FAILS); + return -EINVAL; + } else { + return 0; + } + } + + if (bytes > skb_frag_size(frag)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS); + return -EINVAL; + } + + skb_push(skb, bytes); + memcpy(skb->data, iph, bytes); + + /* subtract to account for skb_push */ + skb->len -= bytes; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) + frag->bv_offset += bytes; +#else + frag->page_offset += bytes; +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(5, 4, 0)) */ + skb_frag_size_sub(frag, bytes); + + /* subtract to account for skb_frag_size_sub */ + skb->data_len -= bytes; + + return 0; +} + +/* Called by NSS in the DL exception case. + * Since the packet cannot be sent over the accelerated path, we need to + * handle it. Remove the ethernet header and pass it onward to the stack + * if possible. + */ +void rmnet_nss_receive(struct net_device *dev, struct sk_buff *skb, + struct napi_struct *napi) +{ + rmnet_nss_inc_stat(RMNET_NSS_EXCEPTIONS); + + if (!skb) + return; + + if (rmnet_nss_ethhdr_pull(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_HDR); + goto drop; + } + + /* reset header pointers */ + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + skb_reset_mac_header(skb); + + /* reset packet type */ + skb->pkt_type = PACKET_HOST; + + skb->dev = dev; + + /* reset protocol type */ + switch (skb->data[0] & 0xF0) { + case 0x40: + skb->protocol = htons(ETH_P_IP); + break; + case 0x60: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_IP); + goto drop; + } + + rmnet_nss_inc_stat(RMNET_NSS_EX_SUCCESS); + + /* Set this so that we dont loop around netif_receive_skb */ + + skb->cb[0] = 1; + + netif_receive_skb(skb); + return; + +drop: + kfree_skb(skb); +} + +/* Main downlink handler + * Looks up NSS contex associated with the device. If the context is found, + * we add a dummy ethernet header with the approriate protocol field set, + * the pass the packet off to NSS for hardware acceleration. + */ +int rmnet_nss_tx(struct sk_buff *skb) +{ + struct ethhdr *eth; + struct rmnet_nss_ctx *ctx; + struct net_device *dev = skb->dev; + nss_tx_status_t rc; + unsigned int len; + u8 version; + + if (skb_is_nonlinear(skb)) { + if (rmnet_nss_adjust_header(skb)) + goto fail; + else + rmnet_nss_inc_stat(RMNET_NSS_TX_NONLINEAR); + } + + version = ((struct iphdr *)skb->data)->version; + + ctx = rmnet_nss_find_ctx(dev); + if (!ctx) { + rmnet_nss_inc_stat(RMNET_NSS_TX_NO_CTX); + return -EINVAL; + } + + eth = (struct ethhdr *)skb_push(skb, sizeof(*eth)); + memset(ð->h_dest, 0, ETH_ALEN * 2); + if (version == 4) { + eth->h_proto = htons(ETH_P_IP); + } else if (version == 6) { + eth->h_proto = htons(ETH_P_IPV6); + } else { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + goto fail; + } + + skb->protocol = htons(ETH_P_802_3); + /* Get length including ethhdr */ + len = skb->len; + +transmit: + rc = nss_rmnet_rx_tx_buf(ctx->nss_ctx, skb); + if (rc == NSS_TX_SUCCESS) { + /* Increment rmnet_data device stats. + * Don't call rmnet_data_vnd_rx_fixup() to do this, as + * there's no guarantee the skb pointer is still valid. + */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + rmnet_nss_inc_stat(RMNET_NSS_TX_SUCCESS); + return 0; + } else if (rc == NSS_TX_FAILURE_QUEUE) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BUSY_LOOP); + goto transmit; + } else if (rc == NSS_TX_FAILURE_NOT_ENABLED) { + /* New stats */ + rmnet_nss_receive(dev, skb, NULL); + return 0; + } + +fail: + rmnet_nss_inc_stat(RMNET_NSS_TX_FAIL); + kfree_skb(skb); + return 1; +} + +/* Called by NSS in the UL acceleration case. + * We are guaranteed to have an ethernet packet here from the NSS hardware, + * We need to pull the header off and invoke our ndo_start_xmit function + * to handle transmitting the packet to the network stack. + */ +void rmnet_nss_xmit(struct net_device *dev, struct sk_buff *skb) +{ + int rc; + + skb_pull(skb, sizeof(struct ethhdr)); + rmnet_nss_inc_stat(RMNET_NSS_RX_ETH); + + /* Use top-half entry point for the netdev so that we enable QDisc support for RmNet redirect. */ + skb_reset_network_header(skb); + skb->dev = dev; + switch (skb->data[0] & 0xF0) { + case 0x40: + skb->protocol = htons(ETH_P_IP); + break; + case 0x60: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + break; + } + if (rmnet_mark_skb) + rmnet_mark_skb(dev, skb); + + rc = dev_queue_xmit(skb); + if (unlikely(rc != 0)) { + rmnet_nss_inc_stat(RMNET_NSS_RX_BUSY); + } +} + +/* Create and register an NSS context for an rmnet_data device */ +int rmnet_nss_create_vnd(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); + if (!ctx) + return -ENOMEM; + + ctx->rmnet_dev = dev; + ctx->nss_ctx = nss_rmnet_rx_create(dev); + if (!ctx->nss_ctx) { + kfree(ctx); + return -1; + } + + nss_rmnet_rx_register(ctx->nss_ctx, rmnet_nss_receive, dev); + nss_rmnet_rx_xmit_callback_register(ctx->nss_ctx, rmnet_nss_xmit); + hash_add_ptr(rmnet_nss_ctx_hashtable, &ctx->hnode, dev); + return 0; +} + +/* Unregister and destroy the NSS context for an rmnet_data device */ +int rmnet_nss_free_vnd(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + + ctx = rmnet_nss_find_ctx(dev); + rmnet_nss_free_ctx(ctx); + + return 0; +} + +static const struct rmnet_nss_cb rmnet_nss = { + .nss_create = rmnet_nss_create_vnd, + .nss_free = rmnet_nss_free_vnd, + .nss_tx = rmnet_nss_tx, +}; + +int __init rmnet_nss_init(void) +{ + pr_err("%s(): initializing rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, &rmnet_nss); + rmnet_mark_skb = symbol_get(qmi_rmnet_mark_skb); + return 0; +} + +void __exit rmnet_nss_exit(void) +{ + struct hlist_node *tmp; + struct rmnet_nss_ctx *ctx; + int bkt; + + pr_err("%s(): exiting rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, NULL); + if (rmnet_mark_skb) + symbol_put(qmi_rmnet_mark_skb); + + /* Tear down all NSS contexts */ + hash_for_each_safe(rmnet_nss_ctx_hashtable, bkt, tmp, ctx, hnode) + rmnet_nss_free_ctx(ctx); +} + +MODULE_LICENSE("GPL v2"); +module_init(rmnet_nss_init); +module_exit(rmnet_nss_exit); diff --git a/wwan/driver/quectel_QMI_WWAN/Makefile b/wwan/driver/quectel_QMI_WWAN/Makefile new file mode 100644 index 0000000..7e19a63 --- /dev/null +++ b/wwan/driver/quectel_QMI_WWAN/Makefile @@ -0,0 +1,52 @@ +# +# Copyright (C) 2015 OpenWrt.org +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk + +PKG_NAME:=qmi_wwan_q +PKG_VERSION:=3.0 +PKG_RELEASE:=4 + +include $(INCLUDE_DIR)/kernel.mk +include $(INCLUDE_DIR)/package.mk + +PKG_BUILD_PARALLEL:=1 +PKG_BUILD_FLAGS:=gc-sections lto + +define KernelPackage/qmi_wwan_q + SUBMENU:=WWAN Support + TITLE:=Quectel Linux USB QMI WWAN Driver + DEPENDS:=+kmod-usb-net +kmod-usb-wdm +kmod-qca-nss-drv +@NSS_DRV_RMNET_ENABLE + FILES:=$(PKG_BUILD_DIR)/qmi_wwan_q.ko \ + $(PKG_BUILD_DIR)/rmnet_nss.ko + AUTOLOAD:=$(call AutoLoad,42,rmnet_nss) \ + $(call AutoLoad,81,qmi_wwan_q) +endef + +define KernelPackage/qmi_wwan_q/description + Quectel Linux USB QMI WWAN Driver +endef + +EXTRA_CFLAGS+= \ + -I$(STAGING_DIR)/usr/include/qca-nss-drv + +MAKE_OPTS:= \ + ARCH="$(LINUX_KARCH)" \ + CROSS_COMPILE="$(TARGET_CROSS)" \ + EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \ + CXXFLAGS="$(TARGET_CXXFLAGS)" \ + M="$(PKG_BUILD_DIR)" \ + $(EXTRA_KCONFIG) + +define Build/Compile + +$(MAKE) -C "$(LINUX_DIR)" $(strip $(MAKE_OPTS)) \ + $(KERNEL_MAKE_FLAGS) \ + $(PKG_JOBS) \ + modules +endef + +$(eval $(call KernelPackage,qmi_wwan_q)) diff --git a/wwan/driver/quectel_QMI_WWAN/patches/100-kernel-6.6.patch b/wwan/driver/quectel_QMI_WWAN/patches/100-kernel-6.6.patch new file mode 100644 index 0000000..6cbc30c --- /dev/null +++ b/wwan/driver/quectel_QMI_WWAN/patches/100-kernel-6.6.patch @@ -0,0 +1,104 @@ +--- a/qmi_wwan_q.c ++++ b/qmi_wwan_q.c +@@ -46,7 +46,7 @@ + #define ARPHRD_RAWIP ARPHRD_NONE + #endif + +-#ifdef CONFIG_PINCTRL_IPQ807x ++#ifdef CONFIG_PINCTRL_IPQ8074 + #define CONFIG_QCA_NSS_DRV + //#define CONFIG_QCA_NSS_PACKET_FILTER + #endif +@@ -59,14 +59,12 @@ struct rmnet_nss_cb { + int (*nss_tx)(struct sk_buff *skb); + }; + static struct rmnet_nss_cb __read_mostly *nss_cb = NULL; +-#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018) +-#ifdef CONFIG_RMNET_DATA ++#if defined(CONFIG_PINCTRL_IPQ8074) || defined(CONFIG_PINCTRL_IPQ5018) + #define CONFIG_QCA_NSS_DRV + /* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */ + /* set at qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c */ + extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; + #endif +-#endif + + /* This driver supports wwan (3G/LTE/?) devices using a vendor + * specific management protocol called Qualcomm MSM Interface (QMI) - +@@ -828,26 +826,25 @@ static struct rtnl_link_stats64 *_rmnet_ + stats64 = per_cpu_ptr(dev->stats64, cpu); + + do { +-#if (LINUX_VERSION_CODE < KERNEL_VERSION( 6,6,0 )) ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,6,0)) + start = u64_stats_fetch_begin_irq(&stats64->syncp); + #else + start = u64_stats_fetch_begin(&stats64->syncp); + #endif +- rx_packets = stats64->rx_packets; +- rx_bytes = stats64->rx_bytes; +- tx_packets = stats64->tx_packets; +- tx_bytes = stats64->tx_bytes; +-#if (LINUX_VERSION_CODE < KERNEL_VERSION( 6,6,0 )) ++ rx_packets = u64_stats_read(&stats64->rx_packets); ++ rx_bytes = u64_stats_read(&stats64->rx_bytes); ++ tx_packets = u64_stats_read(&stats64->tx_packets); ++ tx_bytes = u64_stats_read(&stats64->tx_bytes); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,6,0)) + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); + #else + } while (u64_stats_fetch_retry(&stats64->syncp, start)); + #endif + +- stats->rx_packets += u64_stats_read(&rx_packets); +- stats->rx_bytes += u64_stats_read(&rx_bytes); +- stats->tx_packets += u64_stats_read(&tx_packets); +- stats->tx_bytes += u64_stats_read(&tx_bytes); +-#endif ++ stats->rx_packets += rx_packets; ++ stats->rx_bytes += rx_bytes; ++ stats->tx_packets += tx_packets; ++ stats->tx_bytes += tx_bytes; + } + + return stats; +@@ -2588,7 +2585,6 @@ static void qmap_qmi_wwan_disconnect(str + static struct usb_driver qmi_wwan_driver = { + .name = "qmi_wwan_q", + .id_table = products, +- .probe = qmi_wwan_probe, + #if defined(QUECTEL_WWAN_QMAP) + .probe = qmap_qmi_wwan_probe, + .disconnect = qmap_qmi_wwan_disconnect, +--- a/rmnet_nss.c ++++ b/rmnet_nss.c +@@ -31,8 +31,8 @@ struct rmnet_nss_cb { + int (*nss_tx)(struct sk_buff *skb); + }; + +-extern struct rmnet_nss_cb *rmnet_nss_callbacks; +- ++struct rmnet_nss_cb *rmnet_nss_callbacks; ++EXPORT_SYMBOL(rmnet_nss_callbacks); + #endif + + #define RMNET_NSS_HASH_BITS 8 +@@ -356,7 +356,8 @@ int rmnet_nss_tx(struct sk_buff *skb) + } + + eth = (struct ethhdr *)skb_push(skb, sizeof(*eth)); +- memset(ð->h_dest, 0, ETH_ALEN * 2); ++ memset(eth->h_dest, 0, ETH_ALEN); // Clear h_dest ++ memset(eth->h_source, 0, ETH_ALEN); // Clear h_source + if (version == 4) { + eth->h_proto = htons(ETH_P_IP); + } else if (version == 6) { +@@ -463,7 +464,7 @@ int rmnet_nss_free_vnd(struct net_device + return 0; + } + +-static const struct rmnet_nss_cb rmnet_nss = { ++static struct rmnet_nss_cb rmnet_nss = { + .nss_create = rmnet_nss_create_vnd, + .nss_free = rmnet_nss_free_vnd, + .nss_tx = rmnet_nss_tx, diff --git a/wwan/driver/quectel_QMI_WWAN/src/Makefile b/wwan/driver/quectel_QMI_WWAN/src/Makefile new file mode 100644 index 0000000..95fddb7 --- /dev/null +++ b/wwan/driver/quectel_QMI_WWAN/src/Makefile @@ -0,0 +1,35 @@ +obj-m += rmnet_nss.o qmi_wwan_q.o + +PWD := $(shell pwd) + +ifeq ($(ARCH),) +ARCH := $(shell uname -m) +endif +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := +endif +ifeq ($(KDIR),) +KDIR := /lib/modules/$(shell uname -r)/build +ifeq ($(ARCH),i686) +ifeq ($(wildcard $KDIR/arch/$ARCH),) +ARCH=i386 +endif +endif +endif + +ifneq ($(findstring &,${PWD}),) +$(warning "${PWD}") +$(warning "current directory contain special char '&' !") +$(error "please remove it!") +endif + +default: + $(MAKE) ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} -C $(KDIR) M=$(PWD) modules + +install: default + install -m 644 $(PWD)/*.ko /lib/modules/$(shell uname -r)/kernel/drivers/net/usb/ + depmod + +clean: + rm -rf *~ .tmp_versions modules.order Module.symvers + find . -type f -name "*~" -o -name "*.o" -o -name "*.ko" -o -name "*.cmd" -o -name "*.mod.c" | xargs rm -rf diff --git a/wwan/driver/quectel_QMI_WWAN/src/ReleaseNote.txt b/wwan/driver/quectel_QMI_WWAN/src/ReleaseNote.txt new file mode 100644 index 0000000..8f20152 --- /dev/null +++ b/wwan/driver/quectel_QMI_WWAN/src/ReleaseNote.txt @@ -0,0 +1,146 @@ +Release Notes + +[V1.2.2] +Date: 9/7/2022 +enhancement: + 1. Optimization, the network card send queue wakeup is changed from callback to tasklet + 2. Add the function of returning LAN packets in bridge mode + 3. support ndo ioctl on kernel>5.14 + 4. Allow setting MTU greater than 1500 +fix: + +[V1.2.1] +Date: 9/26/2021 +enhancement: + 1. support IPQ5018's NSS + 2. use 'qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c' instead myself rmnet_nss.c + and qmi_wwan_q.ko must load after rmnet_nss.ko +fix: + +[V1.2.0.25] +Date: 9/17/2021 +enhancement: +fix: + 1. add sdx6x platfrom support + +[V1.2.0.24] +Date: 9/6/2021 +enhancement: +fix: + 1. add BG95 support + 2. support Linux 5.14.0 + +[V1.2.0.23] +Date: 3/23/2021 +enhancement: +fix: + 1. add sdx12 platfrom support + +[V1.2.0.22] +Date: 2/5/2021 +enhancement: +fix: + 1. fix panic (memory-access-after-free) when do modem reboot stress test + +[V1.2.0.21] +Date: 2/4/2021 +enhancement: + 1. Code refactoring - QMAP and rmnet +fix: + 1. qmap_qmi_wwan_rx_fixup: change skb_dequeue to __skb_dequeue + +[V1.2.0.20] +Date: 11/2/2020 +enhancement: +fix: + 1. LTE-A modems can not obtain IP by DHCP + +[V1.2.0.19] +Date: 10/9/2020 +enhancement: +fix: + 1. X55 can not access internet after usb resume + +[V1.2.0.18] +Date: 10/9/2020 +enhancement: +fix: + 1. X55: rename rmnet_usb0.1 to wwan0_1 + 1.1 if there is '.', openwrt will think it is vlan, and auto create vlan + 1.2 if there is '.', android will think it is not vaild + 1.3 if named as rmnet_usb0, and SOC is QCOM, QCOM's netmgr will auto manager it + +[V1.2.0.17] +Date: 9/14/2020 +enhancement: + 1. Code refactoring - QMAP size and version +fix: + +[V1.2.0.16] +Date: 9/14/2020 +enhancement: + 1. rx_fixup() check if this is enough skb_headroom() to fill ethernet header +fix: + 1. fix "WARNING: suspicious RCU usage" + +[V1.2.0.15] +Date: 9/10/2020 +enhancement: +fix: + 1. fix compile errors on kernel 3.10~3.13 + +[V1.2.0.14] +Date: 7/24/2020 +enhancement: +fix: + 1. fix QMAP V5 bug on Big Endian CPU + +[V1.2.0.13] +Date: 6/22/2020 +enhancement: +fix: + 1. fix no data traffic when do Upload TPUT test + +[V1.2.0.12] +Date: 5/29/2020 +enhancement: +fix: + 1. IPQ8074: when enable hyfi, quectel-CM will crash system crash + +[V1.2.0.9] +Date: 5/13/2020 +enhancement: +fix: + 1. IPQ8074: enable CONFIG_QCA_NSS_DRV by CONFIG_PINCTRL_IPQ807x (from CONFIG_ARCH_IPQ807x) + +[V1.2.0.8] +Date: 5/9/2020 +enhancement: +fix: + 1. fix compile errors on kernel V3.10 + +[V1.2.0.7] +Date: 4/25/2020 +enhancement: + 1. X55 support bridge mode +fix: + +[V1.2.0.6] +Date: 4/20/2020 +enhancement: + 1. add stat64, or the rx/tx statics will become to 0 when data > 4G + 2. do not use skb_clone, will make QCOM's NSS and SFE 's cpu loading very high +fix: + +[V1.2.0.5] +Date: 4/8/2020 +enhancement: + 1. add attrite link_state, change carrier state accoring link_state + quectel-CM will set link_state to 1 when QMI setup call success. +fix: + +[V1.2.0.4] +Date: 4/8/2020 +enhancement: + 1. support X55's QMAP V5 +fix: diff --git a/wwan/driver/quectel_QMI_WWAN/src/qmi_wwan_q.c b/wwan/driver/quectel_QMI_WWAN/src/qmi_wwan_q.c new file mode 100644 index 0000000..4f1a468 --- /dev/null +++ b/wwan/driver/quectel_QMI_WWAN/src/qmi_wwan_q.c @@ -0,0 +1,2626 @@ +/* + * Copyright (c) 2012 Bjørn Mork + * + * The probing code is heavily inspired by cdc_ether, which is: + * Copyright (C) 2003-2005 by David Brownell + * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0) //8b094cd03b4a3793220d8d8d86a173bfea8c285b +#include +#else +#define timespec64 timespec +#define ktime_get_ts64 ktime_get_ts +#define timespec64_sub timespec_sub +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef ETH_P_MAP +#define ETH_P_MAP 0xDA1A +#endif + +#if (ETH_P_MAP == 0x00F9) +#undef ETH_P_MAP +#define ETH_P_MAP 0xDA1A +#endif + +#ifndef ARPHRD_RAWIP +#define ARPHRD_RAWIP ARPHRD_NONE +#endif + +#ifdef CONFIG_PINCTRL_IPQ807x +#define CONFIG_QCA_NSS_DRV +//#define CONFIG_QCA_NSS_PACKET_FILTER +#endif + +#define _RMNET_NSS_H_ +#define _RMENT_NSS_H_ +struct rmnet_nss_cb { + int (*nss_create)(struct net_device *dev); + int (*nss_free)(struct net_device *dev); + int (*nss_tx)(struct sk_buff *skb); +}; +static struct rmnet_nss_cb __read_mostly *nss_cb = NULL; +#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018) +#ifdef CONFIG_RMNET_DATA +#define CONFIG_QCA_NSS_DRV +/* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */ +/* set at qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c */ +extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly; +#endif +#endif + +/* This driver supports wwan (3G/LTE/?) devices using a vendor + * specific management protocol called Qualcomm MSM Interface (QMI) - + * in addition to the more common AT commands over serial interface + * management + * + * QMI is wrapped in CDC, using CDC encapsulated commands on the + * control ("master") interface of a two-interface CDC Union + * resembling standard CDC ECM. The devices do not use the control + * interface for any other CDC messages. Most likely because the + * management protocol is used in place of the standard CDC + * notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE + * + * Alternatively, control and data functions can be combined in a + * single USB interface. + * + * Handling a protocol like QMI is out of the scope for any driver. + * It is exported as a character device using the cdc-wdm driver as + * a subdriver, enabling userspace applications ("modem managers") to + * handle it. + * + * These devices may alternatively/additionally be configured using AT + * commands on a serial interface + */ +#define VERSION_NUMBER "V1.2.2" +#define QUECTEL_WWAN_VERSION "Quectel_Linux&Android_QMI_WWAN_Driver_"VERSION_NUMBER +static const char driver_name[] = "qmi_wwan_q"; + +/* driver specific data */ +struct qmi_wwan_state { + struct usb_driver *subdriver; + atomic_t pmcount; + unsigned long unused; + struct usb_interface *control; + struct usb_interface *data; +}; + +/* default ethernet address used by the modem */ +static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; + +#if 1 //Added by Quectel +/* + Quectel_WCDMA<E_Linux_USB_Driver_User_Guide_V1.9.pdf + 5.6. Test QMAP on GobiNet or QMI WWAN + 0 - no QMAP + 1 - QMAP (Aggregation protocol) + X - QMAP (Multiplexing and Aggregation protocol) +*/ +#define QUECTEL_WWAN_QMAP 4 //MAX is 7 + +#if defined(QUECTEL_WWAN_QMAP) +#define QUECTEL_QMAP_MUX_ID 0x81 + +static uint __read_mostly qmap_mode = 0; +module_param( qmap_mode, uint, S_IRUGO); +module_param_named( rx_qmap, qmap_mode, uint, S_IRUGO ); +#endif + +#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) || defined(CONFIG_BRIDGE_LAN) +#define QUECTEL_BRIDGE_MODE +#endif + +#ifdef QUECTEL_BRIDGE_MODE +static uint __read_mostly bridge_mode = 0/*|BIT(1)*/; +module_param( bridge_mode, uint, S_IRUGO ); +#endif + +#ifdef CONFIG_BRIDGE_LAN +static const u8 broadcast_mac_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; +#endif + +//#define QMI_NETDEV_ONE_CARD_MODE +static uint __read_mostly one_card_mode = 0; + +#if defined(QUECTEL_WWAN_QMAP) +#define QUECTEL_UL_DATA_AGG 1 + +#if defined(QUECTEL_UL_DATA_AGG) +struct tx_agg_ctx { + /* QMIWDS_ADMIN_SET_DATA_FORMAT_RESP TLV_0x17 and TLV_0x18 */ + uint ul_data_aggregation_max_datagrams; //UplinkDataAggregationMaxDatagramsTlv + uint ul_data_aggregation_max_size; //UplinkDataAggregationMaxSizeTlv + uint dl_minimum_padding; //0x1A +}; +#endif + +typedef struct { + unsigned int size; + unsigned int rx_urb_size; + unsigned int ep_type; + unsigned int iface_id; + unsigned int qmap_mode; + unsigned int qmap_version; + unsigned int dl_minimum_padding; + char ifname[8][16]; + unsigned char mux_id[8]; +} RMNET_INFO; + +typedef struct sQmiWwanQmap +{ + struct usbnet *mpNetDev; + struct driver_info driver_info; + atomic_t refcount; + struct net_device *mpQmapNetDev[QUECTEL_WWAN_QMAP]; + uint link_state; + uint qmap_mode; + uint qmap_size; + uint qmap_version; + +#if defined(QUECTEL_UL_DATA_AGG) + struct tx_agg_ctx tx_ctx; + struct tasklet_struct txq; + struct tasklet_struct usbnet_bh; +#endif + +#ifdef QUECTEL_BRIDGE_MODE + uint bridge_mode; + uint bridge_ipv4; + unsigned char bridge_mac[ETH_ALEN]; +#ifdef CONFIG_BRIDGE_LAN + unsigned char bridge_self_mac[ETH_ALEN]; +#endif +#endif + uint use_rmnet_usb; + RMNET_INFO rmnet_info; +} sQmiWwanQmap; + +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,13,0) //8f84985fec10de64a6b4cdfea45f2b0ab8f07c78 +#define MHI_NETDEV_STATUS64 +#endif +struct qmap_priv { + struct usbnet *dev; + struct net_device *real_dev; + struct net_device *self_dev; + u8 offset_id; + u8 mux_id; + u8 qmap_version; // 5~v1, 9~v5 + u8 link_state; + +#if defined(MHI_NETDEV_STATUS64) + struct pcpu_sw_netstats __percpu *stats64; +#endif + + spinlock_t agg_lock; + struct sk_buff *agg_skb; + unsigned agg_count; + struct timespec64 agg_time; + struct hrtimer agg_hrtimer; + struct work_struct agg_wq; + +#ifdef QUECTEL_BRIDGE_MODE + uint bridge_mode; + uint bridge_ipv4; + unsigned char bridge_mac[ETH_ALEN]; +#ifdef CONFIG_BRIDGE_LAN + unsigned char bridge_self_mac[ETH_ALEN]; +#endif +#endif + uint use_qca_nss; +}; + +struct qmap_hdr { + u8 cd_rsvd_pad; + u8 mux_id; + u16 pkt_len; +} __packed; + +enum rmnet_map_v5_header_type { + RMNET_MAP_HEADER_TYPE_UNKNOWN, + RMNET_MAP_HEADER_TYPE_COALESCING = 0x1, + RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2, + RMNET_MAP_HEADER_TYPE_ENUM_LENGTH +}; + +/* Main QMAP header */ +struct rmnet_map_header { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 pad_len:6; + u8 next_hdr:1; + u8 cd_bit:1; +#elif defined (__BIG_ENDIAN_BITFIELD) + u8 cd_bit:1; + u8 next_hdr:1; + u8 pad_len:6; +#else +#error "Please fix " +#endif + u8 mux_id; + __be16 pkt_len; +} __aligned(1); + +/* QMAP v5 headers */ +struct rmnet_map_v5_csum_header { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 next_hdr:1; + u8 header_type:7; + u8 hw_reserved:7; + u8 csum_valid_required:1; +#elif defined (__BIG_ENDIAN_BITFIELD) + u8 header_type:7; + u8 next_hdr:1; + u8 csum_valid_required:1; + u8 hw_reserved:7; +#else +#error "Please fix " +#endif + __be16 reserved; +} __aligned(1); + +#ifdef QUECTEL_BRIDGE_MODE +static int is_qmap_netdev(const struct net_device *netdev); +#endif +#endif + +static const struct driver_info rmnet_usb_info; + +#ifdef QUECTEL_BRIDGE_MODE +static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4) { + struct arphdr *parp; + u8 *arpptr, *sha; + u8 sip[4], tip[4], ipv4[4]; + struct sk_buff *reply = NULL; + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + parp = arp_hdr(skb); + + if (parp->ar_hrd == htons(ARPHRD_ETHER) && parp->ar_pro == htons(ETH_P_IP) + && parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) { + arpptr = (u8 *)parp + sizeof(struct arphdr); + sha = arpptr; + arpptr += net->addr_len; /* sha */ + memcpy(sip, arpptr, sizeof(sip)); + arpptr += sizeof(sip); + arpptr += net->addr_len; /* tha */ + memcpy(tip, arpptr, sizeof(tip)); + + pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d, ipv4=%d.%d.%d.%d\n", netdev_name(net), + sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3], ipv4[0], ipv4[1], ipv4[2], ipv4[3]); + //wwan0 sip = 10.151.137.255, tip=10.151.138.0, ipv4=10.151.137.255 + if (tip[0] == ipv4[0] && tip[1] == ipv4[1] && (tip[2]&0xFC) == (ipv4[2]&0xFC) && tip[3] != ipv4[3]) + reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), net, *((__be32 *)tip), sha, default_modem_addr, sha); + + if (reply) { + skb_reset_mac_header(reply); + __skb_pull(reply, skb_network_offset(reply)); + reply->ip_summed = CHECKSUM_UNNECESSARY; + reply->pkt_type = PACKET_HOST; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) + netif_rx_ni(reply); +#else + netif_rx(reply); +#endif + } + return 1; + } + + return 0; +} + +static struct sk_buff *bridge_mode_tx_fixup(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4, unsigned char *bridge_mac) { + struct ethhdr *ehdr; + const struct iphdr *iph; + + skb_reset_mac_header(skb); + ehdr = eth_hdr(skb); + + if (ehdr->h_proto == htons(ETH_P_ARP)) { + if (bridge_ipv4) + bridge_arp_reply(net, skb, bridge_ipv4); + return NULL; + } + + iph = ip_hdr(skb); + //DBG("iphdr: "); + //PrintHex((void *)iph, sizeof(struct iphdr)); + +// 1 0.000000000 0.0.0.0 255.255.255.255 DHCP 362 DHCP Request - Transaction ID 0xe7643ad7 + if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr == 0x00000000 && iph->daddr == 0xFFFFFFFF) { + //if (udp_hdr(skb)->dest == htons(67)) //DHCP Request + { + memcpy(bridge_mac, ehdr->h_source, ETH_ALEN); + pr_info("%s PC Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", netdev_name(net), + bridge_mac[0], bridge_mac[1], bridge_mac[2], bridge_mac[3], bridge_mac[4], bridge_mac[5]); + } + } + +#ifdef CONFIG_BRIDGE_LAN + //bridge Lan IP 192.168.0.0 + if (ehdr->h_proto == htons(ETH_P_IP) && (iph->daddr & 0xFFFF) == 0xA8C0) + { + struct sk_buff *reply = skb_copy(skb, GFP_ATOMIC); + ehdr = eth_hdr(reply); + + memcpy(ehdr->h_source, default_modem_addr, ETH_ALEN); + if(is_qmap_netdev(net)) + { + struct qmap_priv *priv = netdev_priv(net); + memcpy(ehdr->h_dest, priv->bridge_self_mac, ETH_ALEN); + } + else + { + struct usbnet * usbnetdev = netdev_priv(net); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + memcpy(ehdr->h_dest, pQmapDev->bridge_self_mac, ETH_ALEN); + } + + //pr_info("%s br rx pkt addr: %02x:%02x:%02x:%02x:%02x:%02x -> %02x:%02x:%02x:%02x:%02x:%02x\n", netdev_name(net), + // ehdr->h_source[0], ehdr->h_source[1], ehdr->h_source[2], ehdr->h_source[3], ehdr->h_source[4], ehdr->h_source[5], + // ehdr->h_dest[0], ehdr->h_dest[1], ehdr->h_dest[2], ehdr->h_dest[3], ehdr->h_dest[4], ehdr->h_dest[5]); + + skb_reset_mac_header(reply); + __skb_pull(reply, skb_network_offset(reply)); + reply->ip_summed = CHECKSUM_UNNECESSARY; + reply->pkt_type = PACKET_HOST; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) + netif_rx_ni(reply); +#else + netif_rx(reply); +#endif + return NULL; + } +#endif + + if (memcmp(ehdr->h_source, bridge_mac, ETH_ALEN)) { + return NULL; + } + + return skb; +} + +static void bridge_mode_rx_fixup(sQmiWwanQmap *pQmapDev, struct net_device *net, struct sk_buff *skb) { + uint bridge_mode = 0; + unsigned char *bridge_mac; + + if (pQmapDev->qmap_mode > 1 || pQmapDev->use_rmnet_usb == 1) { + struct qmap_priv *priv = netdev_priv(net); + bridge_mode = priv->bridge_mode; + bridge_mac = priv->bridge_mac; + } + else { + bridge_mode = pQmapDev->bridge_mode; + bridge_mac = pQmapDev->bridge_mac; + } + + if (bridge_mode) + memcpy(eth_hdr(skb)->h_dest, bridge_mac, ETH_ALEN); + else + memcpy(eth_hdr(skb)->h_dest, net->dev_addr, ETH_ALEN); +} +#endif + +#if defined(QUECTEL_WWAN_QMAP) +static ssize_t qmap_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *netdev = to_net_dev(dev); + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + return snprintf(buf, PAGE_SIZE, "%d\n", pQmapDev->qmap_mode); +} + +static DEVICE_ATTR(qmap_mode, S_IRUGO, qmap_mode_show, NULL); + +static ssize_t qmap_size_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *netdev = to_net_dev(dev); + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + return snprintf(buf, PAGE_SIZE, "%u\n", pQmapDev->qmap_size); +} + +static DEVICE_ATTR(qmap_size, S_IRUGO, qmap_size_show, NULL); + +static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *netdev = to_net_dev(dev); + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + return snprintf(buf, PAGE_SIZE, "0x%x\n", pQmapDev->link_state); +} + +static ssize_t link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *netdev = to_net_dev(dev); + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + unsigned link_state = 0; + unsigned old_link = pQmapDev->link_state; + uint offset_id = 0; + + link_state = simple_strtoul(buf, NULL, 0); + + if (pQmapDev->qmap_mode == 1) { + pQmapDev->link_state = !!link_state; + } + else if (pQmapDev->qmap_mode > 1) { + offset_id = ((link_state&0x7F) - 1); + + if (offset_id >= pQmapDev->qmap_mode) { + dev_info(dev, "%s offset_id is %d. but qmap_mode is %d\n", __func__, offset_id, pQmapDev->qmap_mode); + return count; + } + + if (link_state&0x80) + pQmapDev->link_state &= ~(1 << offset_id); + else + pQmapDev->link_state |= (1 << offset_id); + } + + if (old_link != pQmapDev->link_state) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[offset_id]; + + if (usbnetdev->net->flags & IFF_UP) { + if (pQmapDev->link_state) { + netif_carrier_on(usbnetdev->net); + } + } + + if (qmap_net && qmap_net != netdev) { + struct qmap_priv *priv = netdev_priv(qmap_net); + + priv->link_state = !!(pQmapDev->link_state & (1 << offset_id)); + + if (qmap_net->flags & IFF_UP) { + if (priv->link_state) { + netif_carrier_on(qmap_net); + if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(usbnetdev->net)) + netif_wake_queue(qmap_net); + } + else { + netif_carrier_off(qmap_net); + } + } + } + + if (usbnetdev->net->flags & IFF_UP) { + if (!pQmapDev->link_state) { + netif_carrier_off(usbnetdev->net); + } + } + + dev_info(dev, "link_state 0x%x -> 0x%x\n", old_link, pQmapDev->link_state); + } + + return count; +} + +#ifdef QUECTEL_BRIDGE_MODE +static ssize_t bridge_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *netdev = to_net_dev(dev); + uint old_mode = 0; + uint bridge_mode = simple_strtoul(buf, NULL, 0); + + if (netdev->type != ARPHRD_ETHER) { + return count; + } + + if (is_qmap_netdev(netdev)) { + struct qmap_priv *priv = netdev_priv(netdev); + old_mode = priv->bridge_mode; + priv->bridge_mode = bridge_mode; + } + else { + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + old_mode = pQmapDev->bridge_mode; + pQmapDev->bridge_mode = bridge_mode; + } + + if (old_mode != bridge_mode) { + dev_info(dev, "bridge_mode change to 0x%x\n", bridge_mode); + } + + return count; +} + +static ssize_t bridge_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *netdev = to_net_dev(dev); + uint bridge_mode = 0; + + if (is_qmap_netdev(netdev)) { + struct qmap_priv *priv = netdev_priv(netdev); + bridge_mode = priv->bridge_mode; + } + else { + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + bridge_mode = pQmapDev->bridge_mode; + } + + return snprintf(buf, PAGE_SIZE, "%u\n", bridge_mode); +} + +static ssize_t bridge_ipv4_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct net_device *netdev = to_net_dev(dev); + unsigned int bridge_ipv4 = 0; + unsigned char ipv4[4]; + + if (is_qmap_netdev(netdev)) { + struct qmap_priv *priv = netdev_priv(netdev); + bridge_ipv4 = priv->bridge_ipv4; + } + else { + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + bridge_ipv4 = pQmapDev->bridge_ipv4; + } + + ipv4[0] = (bridge_ipv4 >> 24) & 0xFF; + ipv4[1] = (bridge_ipv4 >> 16) & 0xFF; + ipv4[2] = (bridge_ipv4 >> 8) & 0xFF; + ipv4[3] = (bridge_ipv4 >> 0) & 0xFF; + + return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", ipv4[0], ipv4[1], ipv4[2], ipv4[3]); +} + +static ssize_t bridge_ipv4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct net_device *netdev = to_net_dev(dev); + + if (is_qmap_netdev(netdev)) { + struct qmap_priv *priv = netdev_priv(netdev); + priv->bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + else { + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + pQmapDev->bridge_ipv4 = simple_strtoul(buf, NULL, 16); + } + + return count; +} +#endif + +static DEVICE_ATTR(link_state, S_IWUSR | S_IRUGO, link_state_show, link_state_store); +#ifdef QUECTEL_BRIDGE_MODE +static DEVICE_ATTR(bridge_mode, S_IWUSR | S_IRUGO, bridge_mode_show, bridge_mode_store); +static DEVICE_ATTR(bridge_ipv4, S_IWUSR | S_IRUGO, bridge_ipv4_show, bridge_ipv4_store); +#endif + +static struct attribute *qmi_wwan_sysfs_attrs[] = { + &dev_attr_link_state.attr, + &dev_attr_qmap_mode.attr, + &dev_attr_qmap_size.attr, +#ifdef QUECTEL_BRIDGE_MODE + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, +#endif + NULL, +}; + +static struct attribute_group qmi_wwan_sysfs_attr_group = { + .attrs = qmi_wwan_sysfs_attrs, +}; + +#ifdef QUECTEL_BRIDGE_MODE +static struct attribute *qmi_qmap_sysfs_attrs[] = { + &dev_attr_bridge_mode.attr, + &dev_attr_bridge_ipv4.attr, + NULL, +}; + +static struct attribute_group qmi_qmap_sysfs_attr_group = { + .attrs = qmi_qmap_sysfs_attrs, +}; +#endif + +static int qmap_open(struct net_device *qmap_net) +{ + struct qmap_priv *priv = netdev_priv(qmap_net); + struct net_device *real_dev = priv->real_dev; + + //printk("%s %s real_dev %d %d %d %d+++\n", __func__, dev->name, + // netif_carrier_ok(real_dev), netif_queue_stopped(real_dev), netif_carrier_ok(dev), netif_queue_stopped(dev)); + + if (!(priv->real_dev->flags & IFF_UP)) + return -ENETDOWN; + + if (priv->link_state) { + netif_carrier_on(real_dev); + netif_carrier_on(qmap_net); + if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(real_dev)) + netif_wake_queue(qmap_net); + } + //printk("%s %s real_dev %d %d %d %d---\n", __func__, dev->name, + // netif_carrier_ok(real_dev), netif_queue_stopped(real_dev), netif_carrier_ok(dev), netif_queue_stopped(dev)); + + return 0; +} + +static int qmap_stop(struct net_device *qmap_net) +{ + //printk("%s %s %d %d+++\n", __func__, dev->name, + // netif_carrier_ok(dev), netif_queue_stopped(dev)); + + netif_carrier_off(qmap_net); + return 0; +} + +static void qmap_wake_queue(sQmiWwanQmap *pQmapDev) +{ + uint i = 0; + + if (!pQmapDev || !pQmapDev->use_rmnet_usb) + return; + + for (i = 0; i < pQmapDev->qmap_mode; i++) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i]; + + if (qmap_net && netif_carrier_ok(qmap_net) && netif_queue_stopped(qmap_net)) { + netif_wake_queue(qmap_net); + } + } +} + +static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id) { + struct qmap_hdr *qhdr; + int pad = 0; + + pad = skb->len%4; + if (pad) { + pad = 4 - pad; + if (skb_tailroom(skb) < pad) { + printk("skb_tailroom small!\n"); + pad = 0; + } + if (pad) + __skb_put(skb, pad); + } + + qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr)); + qhdr->cd_rsvd_pad = pad; + qhdr->mux_id = mux_id; + qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr)); + + return skb; +} + +static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) { + struct rmnet_map_header *map_header; + struct rmnet_map_v5_csum_header *ul_header; + u32 padding, map_datalen; + + map_datalen = skb->len; + padding = map_datalen%4; + if (padding) { + padding = 4 - padding; + if (skb_tailroom(skb) < padding) { + printk("skb_tailroom small!\n"); + padding = 0; + } + if (padding) + __skb_put(skb, padding); + } + + map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header))); + map_header->cd_bit = 0; + map_header->next_hdr = 1; + map_header->pad_len = padding; + map_header->mux_id = mux_id; + map_header->pkt_len = htons(map_datalen + padding); + + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + memset(ul_header, 0, sizeof(*ul_header)); + ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD; + if (skb->ip_summed == CHECKSUM_PARTIAL) { +#if 0 //TODO + skb->ip_summed = CHECKSUM_NONE; + /* Ask for checksum offloading */ + ul_header->csum_valid_required = 1; +#endif + } + + return skb; +} + +static void rmnet_vnd_update_rx_stats(struct net_device *net, + unsigned rx_packets, unsigned rx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct qmap_priv *dev = netdev_priv(net); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + + u64_stats_update_begin(&stats64->syncp); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) + stats64->rx_packets += rx_packets; + stats64->rx_bytes += rx_bytes; +#else + u64_stats_add(&stats64->rx_packets, rx_packets); + u64_stats_add(&stats64->rx_bytes, rx_bytes); +#endif + u64_stats_update_end(&stats64->syncp); +#else +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) + net->stats.rx_packets += rx_packets; + net->stats.rx_bytes += rx_bytes; +#else + u64_stats_add(&net->stats.rx_packets, rx_packets); + u64_stats_add(&net->stats.rx_bytes, rx_bytes); +#endif +#endif +} + +static void rmnet_vnd_update_tx_stats(struct net_device *net, + unsigned tx_packets, unsigned tx_bytes) { +#if defined(MHI_NETDEV_STATUS64) + struct qmap_priv *dev = netdev_priv(net); + struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); + + u64_stats_update_begin(&stats64->syncp); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) + stats64->tx_packets += tx_packets; + stats64->tx_bytes += tx_bytes; +#else + u64_stats_add(&stats64->tx_packets, tx_packets); + u64_stats_add(&stats64->tx_bytes, tx_bytes); +#endif + u64_stats_update_end(&stats64->syncp); +#else +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) + net->stats.tx_packets += tx_packets; + net->stats.tx_bytes += tx_bytes; +#else + u64_stats_add(&net->stats.tx_packets, tx_packets); + u64_stats_add(&net->tx_bytes, tx_bytes); +#endif +#endif +} + +#if defined(MHI_NETDEV_STATUS64) +static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) +{ + struct qmap_priv *dev = netdev_priv(net); + unsigned int start; + int cpu; + + netdev_stats_to_stats64(stats, &net->stats); + + if (nss_cb && dev->use_qca_nss) { // rmnet_nss.c:rmnet_nss_tx() will update rx stats + stats->rx_packets = 0; + stats->rx_bytes = 0; + } + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats64; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + + stats64 = per_cpu_ptr(dev->stats64, cpu); + + do { +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 6,6,0 )) + start = u64_stats_fetch_begin_irq(&stats64->syncp); +#else + start = u64_stats_fetch_begin(&stats64->syncp); +#endif + rx_packets = stats64->rx_packets; + rx_bytes = stats64->rx_bytes; + tx_packets = stats64->tx_packets; + tx_bytes = stats64->tx_bytes; +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 6,6,0 )) + } while (u64_stats_fetch_retry_irq(&stats64->syncp, start)); +#else + } while (u64_stats_fetch_retry(&stats64->syncp, start)); +#endif + + stats->rx_packets += u64_stats_read(&rx_packets); + stats->rx_bytes += u64_stats_read(&rx_bytes); + stats->tx_packets += u64_stats_read(&tx_packets); + stats->tx_bytes += u64_stats_read(&tx_bytes); +#endif + } + + return stats; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,10,0 )) //bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 +static void rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + _rmnet_vnd_get_stats64(net, stats); +} +#else +static struct rtnl_link_stats64 *rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + return _rmnet_vnd_get_stats64(net, stats); +} +#endif +#endif + +#if defined(QUECTEL_UL_DATA_AGG) +static void usbnet_bh(unsigned long data) { + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)data; + struct tasklet_struct *t = &pQmapDev->usbnet_bh; + bool use_callback = false; + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 5,8,0 )) //c955e329bb9d44fab75cf2116542fcc0de0473c5 + use_callback = t->use_callback; + if (use_callback) + t->callback(&pQmapDev->mpNetDev->bh); +#endif + + if (!use_callback) + t->func(t->data); + + if (!netif_queue_stopped(pQmapDev->mpNetDev->net)) { + qmap_wake_queue((sQmiWwanQmap *)data); + } +} + +static void rmnet_usb_tx_wake_queue(unsigned long data) { + qmap_wake_queue((sQmiWwanQmap *)data); +} + +#if 0 +static void rmnet_usb_tx_skb_destructor(struct sk_buff *skb) { + struct net_device *net = skb->dev; + struct usbnet * dev = netdev_priv( net ); + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + if (pQmapDev && pQmapDev->use_rmnet_usb) { + int i; + + for (i = 0; i < pQmapDev->qmap_mode; i++) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i]; + + if (qmap_net && netif_carrier_ok(qmap_net) && netif_queue_stopped(qmap_net)) { + tasklet_schedule(&pQmapDev->txq); + break; + } + } + } +} +#endif + +static int rmnet_usb_tx_agg_skip(struct sk_buff *skb, int offset) +{ + u8 *packet_start = skb->data + offset; + int ready2send = 0; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip4h = (struct iphdr *)(packet_start); + + if (ip4h->protocol == IPPROTO_TCP) { + const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct iphdr)); + if (th->psh) { + ready2send = 1; + } + } + else if (ip4h->protocol == IPPROTO_ICMP) + ready2send = 1; + + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start); + + if (ip6h->nexthdr == NEXTHDR_TCP) { + const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct ipv6hdr)); + if (th->psh) { + ready2send = 1; + } + } else if (ip6h->nexthdr == NEXTHDR_ICMP) { + ready2send = 1; + } else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) { + struct frag_hdr *frag; + + frag = (struct frag_hdr *)(packet_start + + sizeof(struct ipv6hdr)); + if (frag->nexthdr == IPPROTO_ICMPV6) + ready2send = 1; + } + } + + return ready2send; +} + +static void rmnet_usb_tx_agg_work(struct work_struct *work) +{ + struct qmap_priv *priv = + container_of(work, struct qmap_priv, agg_wq); + struct sk_buff *skb = NULL; + unsigned long flags; + + spin_lock_irqsave(&priv->agg_lock, flags); + if (likely(priv->agg_skb)) { + skb = priv->agg_skb; + priv->agg_skb = NULL; + priv->agg_count = 0; + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; + ktime_get_ts64(&priv->agg_time); + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (skb) { + int err; +#if 0 + if (!skb->destructor) + skb->destructor = rmnet_usb_tx_skb_destructor; +#endif + err = dev_queue_xmit(skb); + if (err != NET_XMIT_SUCCESS) { + priv->self_dev->stats.tx_errors++; + } + } +} + +static enum hrtimer_restart rmnet_usb_tx_agg_timer_cb(struct hrtimer *timer) +{ + struct qmap_priv *priv = + container_of(timer, struct qmap_priv, agg_hrtimer); + + schedule_work(&priv->agg_wq); + return HRTIMER_NORESTART; +} + +static long agg_time_limit __read_mostly = 1000000L; //reduce this time, can get better TPUT performance, but will increase USB interrupts +module_param(agg_time_limit, long, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf"); + +static long agg_bypass_time __read_mostly = 10000000L; +module_param(agg_bypass_time, long, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this"); + +static int rmnet_usb_tx_agg(struct sk_buff *skb, struct qmap_priv *priv) { + struct qmi_wwan_state *info = (void *)&priv->dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + struct tx_agg_ctx *ctx = &pQmapDev->tx_ctx; + int ready2send = 0; + int xmit_more = 0; + struct timespec64 diff, now; + struct sk_buff *agg_skb = NULL; + unsigned long flags; + int err; + struct net_device *pNet = priv->self_dev; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0) //6b16f9ee89b8d5709f24bc3ac89ae8b5452c0d7c +#if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0) + xmit_more = skb->xmit_more; +#endif +#else + xmit_more = netdev_xmit_more(); +#endif + + rmnet_vnd_update_tx_stats(pNet, 1, skb->len); + + if (ctx->ul_data_aggregation_max_datagrams == 1) { + skb->protocol = htons(ETH_P_MAP); + skb->dev = priv->real_dev; +#if 0 + if (!skb->destructor) + skb->destructor = rmnet_usb_tx_skb_destructor; +#endif + err = dev_queue_xmit(skb); + if (err != NET_XMIT_SUCCESS) + pNet->stats.tx_errors++; + return NET_XMIT_SUCCESS; + } + +new_packet: + spin_lock_irqsave(&priv->agg_lock, flags); + agg_skb = NULL; + ready2send = 0; + ktime_get_ts64(&now); + diff = timespec64_sub(now, priv->agg_time); + + if (priv->agg_skb) { + if ((priv->agg_skb->len + skb->len) < ctx->ul_data_aggregation_max_size) { + memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len); + priv->agg_count++; + + if (diff.tv_sec > 0 || diff.tv_nsec > agg_time_limit) { + ready2send = 1; + } + else if (priv->agg_count == ctx->ul_data_aggregation_max_datagrams) { + ready2send = 1; + } + else if (xmit_more == 0) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data; + size_t offset = sizeof(struct rmnet_map_header); + if (map_header->next_hdr) + offset += sizeof(struct rmnet_map_v5_csum_header); + + ready2send = rmnet_usb_tx_agg_skip(skb, offset); + } + + dev_kfree_skb_any(skb); + skb = NULL; + } + else { + ready2send = 1; + } + + if (ready2send) { + agg_skb = priv->agg_skb; + priv->agg_skb = NULL; + priv->agg_count = 0; + } + } + else if (skb) { + if (diff.tv_sec > 0 || diff.tv_nsec > agg_bypass_time) { + ready2send = 1; + } + else if (xmit_more == 0) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data; + size_t offset = sizeof(struct rmnet_map_header); + if (map_header->next_hdr) + offset += sizeof(struct rmnet_map_v5_csum_header); + + ready2send = rmnet_usb_tx_agg_skip(skb, offset); + } + + if (ready2send == 0) { + priv->agg_skb = alloc_skb(ctx->ul_data_aggregation_max_size, GFP_ATOMIC); + if (priv->agg_skb) { + skb_reset_network_header(priv->agg_skb); //protocol da1a is buggy, dev wwan0 + memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len); + priv->agg_count++; + dev_kfree_skb_any(skb); + skb = NULL; + } + else { + ready2send = 1; + } + } + + if (ready2send) { + agg_skb = skb; + skb = NULL; + } + } + + if (ready2send) { + priv->agg_time = now; + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (agg_skb) { + agg_skb->protocol = htons(ETH_P_MAP); + agg_skb->dev = priv->real_dev; +#if 0 + if (!agg_skb->destructor) + agg_skb->destructor = rmnet_usb_tx_skb_destructor; +#endif + err = dev_queue_xmit(agg_skb); + if (err != NET_XMIT_SUCCESS) { + pNet->stats.tx_errors++; + } + } + + if (skb) { + goto new_packet; + } + + if (priv->agg_skb) { + if (!hrtimer_is_queued(&priv->agg_hrtimer)) + hrtimer_start(&priv->agg_hrtimer, ns_to_ktime(NSEC_PER_MSEC * 2), HRTIMER_MODE_REL); + } + + return NET_XMIT_SUCCESS; +} +#endif + +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *pNet) +{ + int err; + struct qmap_priv *priv = netdev_priv(pNet); + + if (netif_queue_stopped(priv->real_dev)) { + netif_stop_queue(pNet); + return NETDEV_TX_BUSY; + } + + //printk("%s 1 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (pNet->type == ARPHRD_ETHER) { + skb_reset_mac_header(skb); + +#ifdef QUECTEL_BRIDGE_MODE + if (priv->bridge_mode && bridge_mode_tx_fixup(pNet, skb, priv->bridge_ipv4, priv->bridge_mac) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } +#endif + + if (skb_pull(skb, ETH_HLEN) == NULL) { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + } + //printk("%s 2 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (priv->qmap_version == 5) { + add_qhdr(skb, priv->mux_id); + } + else if (priv->qmap_version == 9) { + add_qhdr_v5(skb, priv->mux_id); + } + else { + dev_kfree_skb_any (skb); + return NETDEV_TX_OK; + } + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + err = rmnet_usb_tx_agg(skb, priv); + + return err; +} + +static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu) +{ + if (new_mtu < 0) + return -EINVAL; + + if (new_mtu > 1500) + printk("warning, set mtu greater than 1500, %d\n", new_mtu); + + rmnet_dev->mtu = new_mtu; + return 0; +} + +/* drivers may override default ethtool_ops in their bind() routine */ +static const struct ethtool_ops rmnet_vnd_ethtool_ops = { + .get_link = ethtool_op_get_link, +}; + +static const struct net_device_ops rmnet_vnd_ops = { + .ndo_open = qmap_open, + .ndo_stop = qmap_stop, + .ndo_start_xmit = rmnet_vnd_start_xmit, + .ndo_change_mtu = rmnet_vnd_change_mtu, +#if defined(MHI_NETDEV_STATUS64) + .ndo_get_stats64 = rmnet_vnd_get_stats64, +#endif +}; + +static void rmnet_usb_ether_setup(struct net_device *rmnet_dev) +{ + ether_setup(rmnet_dev); + + rmnet_dev->flags |= IFF_NOARP; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(4,10,0) + rmnet_dev->max_mtu = 65535; +#endif + + rmnet_dev->ethtool_ops = &rmnet_vnd_ethtool_ops; + rmnet_dev->netdev_ops = &rmnet_vnd_ops; +} + +static void rmnet_usb_rawip_setup(struct net_device *rmnet_dev) +{ + rmnet_dev->needed_headroom = 16; + + /* Raw IP mode */ + rmnet_dev->header_ops = NULL; /* No header */ + rmnet_dev->type = ARPHRD_RAWIP; + rmnet_dev->hard_header_len = 0; + rmnet_dev->flags |= IFF_NOARP; + rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + rmnet_dev->ethtool_ops = &rmnet_vnd_ethtool_ops; + rmnet_dev->netdev_ops = &rmnet_vnd_ops; +} + +static rx_handler_result_t qca_nss_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + + if (!skb) + return RX_HANDLER_CONSUMED; + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + /* Check this so that we dont loop around netif_receive_skb */ + if (skb->cb[0] == 1) { + skb->cb[0] = 0; + + return RX_HANDLER_PASS; + } + + if (nss_cb) { + nss_cb->nss_tx(skb); + return RX_HANDLER_CONSUMED; + } + + return RX_HANDLER_PASS; +} + +static int qmap_register_device(sQmiWwanQmap * pDev, u8 offset_id) +{ + struct net_device *real_dev = pDev->mpNetDev->net; + struct net_device *qmap_net; + struct qmap_priv *priv; + int err; + char name[IFNAMSIZ]; + int use_qca_nss = !!nss_cb; + + sprintf(name, "%s_%d", real_dev->name, offset_id + 1); +#ifdef NET_NAME_UNKNOWN + qmap_net = alloc_netdev(sizeof(struct qmap_priv), name, + NET_NAME_UNKNOWN, rmnet_usb_ether_setup); +#else + qmap_net = alloc_netdev(sizeof(struct qmap_priv), name, + rmnet_usb_ether_setup); +#endif + if (!qmap_net) + return -ENOBUFS; + + SET_NETDEV_DEV(qmap_net, &real_dev->dev); + priv = netdev_priv(qmap_net); + priv->offset_id = offset_id; + priv->real_dev = real_dev; + priv->self_dev = qmap_net; + priv->dev = pDev->mpNetDev; + priv->qmap_version = pDev->qmap_version; + priv->mux_id = QUECTEL_QMAP_MUX_ID + offset_id; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) + memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN); +#else + eth_hw_addr_set (real_dev, qmap_net->dev_addr); +#endif + +#ifdef QUECTEL_BRIDGE_MODE + priv->bridge_mode = !!(pDev->bridge_mode & BIT(offset_id)); + qmap_net->sysfs_groups[0] = &qmi_qmap_sysfs_attr_group; + if (priv->bridge_mode) + use_qca_nss = 0; +#ifdef CONFIG_BRIDGE_LAN + memcpy(priv->bridge_self_mac, broadcast_mac_addr, ETH_ALEN); +#endif +#endif + + if (nss_cb && use_qca_nss) { + rmnet_usb_rawip_setup(qmap_net); + } + + priv->agg_skb = NULL; + priv->agg_count = 0; + hrtimer_init(&priv->agg_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + priv->agg_hrtimer.function = rmnet_usb_tx_agg_timer_cb; + INIT_WORK(&priv->agg_wq, rmnet_usb_tx_agg_work); + ktime_get_ts64(&priv->agg_time); + spin_lock_init(&priv->agg_lock); + priv->use_qca_nss = 0; + +#if defined(MHI_NETDEV_STATUS64) + priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!priv->stats64) { + err = -ENOBUFS; + goto out_free_newdev; + } +#endif + + err = register_netdev(qmap_net); + if (err) + dev_info(&real_dev->dev, "%s(%s)=%d\n", __func__, qmap_net->name, err); + if (err < 0) + goto out_free_newdev; + netif_device_attach (qmap_net); + netif_carrier_off(qmap_net); + + if (nss_cb && use_qca_nss) { + int rc = nss_cb->nss_create(qmap_net); + if (rc) { + /* Log, but don't fail the device creation */ + netdev_err(qmap_net, "Device will not use NSS path: %d\n", rc); + } else { + priv->use_qca_nss = 1; + netdev_info(qmap_net, "NSS context created\n"); + rtnl_lock(); + netdev_rx_handler_register(qmap_net, qca_nss_rx_handler, NULL); + rtnl_unlock(); + } + } + + strcpy(pDev->rmnet_info.ifname[offset_id], qmap_net->name); + pDev->rmnet_info.mux_id[offset_id] = priv->mux_id; + + pDev->mpQmapNetDev[offset_id] = qmap_net; + + dev_info(&real_dev->dev, "%s %s\n", __func__, qmap_net->name); + + return 0; + +out_free_newdev: + free_netdev(qmap_net); + return err; +} + +static void qmap_unregister_device(sQmiWwanQmap * pDev, u8 offset_id) { + struct net_device *qmap_net = pDev->mpQmapNetDev[offset_id]; + + if (qmap_net != NULL && qmap_net != pDev->mpNetDev->net) { + struct qmap_priv *priv = netdev_priv(qmap_net); + unsigned long flags; + + pr_info("qmap_unregister_device(%s)\n", qmap_net->name); + pDev->mpQmapNetDev[offset_id] = NULL; + netif_carrier_off( qmap_net ); + netif_stop_queue( qmap_net ); + + hrtimer_cancel(&priv->agg_hrtimer); + cancel_work_sync(&priv->agg_wq); + spin_lock_irqsave(&priv->agg_lock, flags); + if (priv->agg_skb) { + kfree_skb(priv->agg_skb); + } + spin_unlock_irqrestore(&priv->agg_lock, flags); + + if (nss_cb && priv->use_qca_nss) { + rtnl_lock(); + netdev_rx_handler_unregister(qmap_net); + rtnl_unlock(); + nss_cb->nss_free(qmap_net); + } + +#if defined(MHI_NETDEV_STATUS64) + free_percpu(priv->stats64); +#endif + unregister_netdev (qmap_net); + free_netdev(qmap_net); + } +} + +typedef struct { + unsigned int size; + unsigned int rx_urb_size; + unsigned int ep_type; + unsigned int iface_id; + unsigned int MuxId; + unsigned int ul_data_aggregation_max_datagrams; //0x17 + unsigned int ul_data_aggregation_max_size ;//0x18 + unsigned int dl_minimum_padding; //0x1A +} QMAP_SETTING; + +#ifdef CONFIG_BRIDGE_LAN +typedef struct { + u8 id; + u8 brmac[ETH_ALEN]; +} BRMAC_SETTING; +#endif + +int qma_setting_store(struct device *dev, QMAP_SETTING *qmap_settings, size_t size) { + struct net_device *netdev = to_net_dev(dev); + struct usbnet * usbnetdev = netdev_priv( netdev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + if (qmap_settings->size != size) { + dev_err(dev, "ERROR: qmap_settings.size donot match!\n"); + return -EOPNOTSUPP; + } + +#ifdef QUECTEL_UL_DATA_AGG + netif_tx_lock_bh(netdev); + if (pQmapDev->tx_ctx.ul_data_aggregation_max_datagrams == 1 && qmap_settings->ul_data_aggregation_max_datagrams > 1) { + pQmapDev->tx_ctx.ul_data_aggregation_max_datagrams = qmap_settings->ul_data_aggregation_max_datagrams; + pQmapDev->tx_ctx.ul_data_aggregation_max_size = qmap_settings->ul_data_aggregation_max_size; + pQmapDev->tx_ctx.dl_minimum_padding = qmap_settings->dl_minimum_padding; + dev_info(dev, "ul_data_aggregation_max_datagrams=%d, ul_data_aggregation_max_size=%d, dl_minimum_padding=%d\n", + pQmapDev->tx_ctx.ul_data_aggregation_max_datagrams, + pQmapDev->tx_ctx.ul_data_aggregation_max_size, + pQmapDev->tx_ctx.dl_minimum_padding); + } + netif_tx_unlock_bh(netdev); + return 0; +#endif + + return -EOPNOTSUPP; +} + +static int qmap_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + struct usbnet * usbnetdev = netdev_priv( dev ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + int rc = -EOPNOTSUPP; + uint link_state = 0; + QMAP_SETTING qmap_settings = {0}; +#ifdef CONFIG_BRIDGE_LAN + BRMAC_SETTING brmac_settings = {0}; +#endif + + switch (cmd) { + case 0x89F1: //SIOCDEVPRIVATE + rc = copy_from_user(&link_state, ifr->ifr_ifru.ifru_data, sizeof(link_state)); + if (!rc) { + char buf[32]; + snprintf(buf, sizeof(buf), "%u", link_state); + link_state_store(&dev->dev, NULL, buf, strlen(buf)); + } + break; + + case 0x89F2: //SIOCDEVPRIVATE + rc = copy_from_user(&qmap_settings, ifr->ifr_ifru.ifru_data, sizeof(qmap_settings)); + if (!rc) { + rc = qma_setting_store(&dev->dev, &qmap_settings, sizeof(qmap_settings)); + } + break; + + case 0x89F3: //SIOCDEVPRIVATE + if (pQmapDev->use_rmnet_usb) { + uint i; + + for (i = 0; i < pQmapDev->qmap_mode; i++) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i]; + + if (!qmap_net) + break; + + strcpy(pQmapDev->rmnet_info.ifname[i], qmap_net->name); + } + rc = copy_to_user(ifr->ifr_ifru.ifru_data, &pQmapDev->rmnet_info, sizeof(pQmapDev->rmnet_info)); + } + break; + +#ifdef CONFIG_BRIDGE_LAN + case 0x89F4: //SIOCDEVPRIVATE + rc = copy_from_user(&brmac_settings, ifr->ifr_ifru.ifru_data, sizeof(brmac_settings)); + if (pQmapDev->use_rmnet_usb && brmac_settings.id < qmap_mode) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[brmac_settings.id]; + struct qmap_priv *priv = netdev_priv(qmap_net); + memcpy(priv->bridge_self_mac, brmac_settings.brmac, ETH_ALEN); + pr_info("ioctl 0x89F4 change qmapnet bridge(%d) lan mac -> %02x:%02x:%02x:%02x:%02x:%02x\n", brmac_settings.id, priv->bridge_self_mac[0], + priv->bridge_self_mac[1], priv->bridge_self_mac[2], priv->bridge_self_mac[3], priv->bridge_self_mac[4], priv->bridge_self_mac[5]); + } + else if (!pQmapDev->use_rmnet_usb && brmac_settings.id == 0) { + memcpy(pQmapDev->bridge_self_mac, brmac_settings.brmac, ETH_ALEN); + pr_info("ioctl 0x89F4 change usbnet bridge(%d) lan mac -> %02x:%02x:%02x:%02x:%02x:%02x\n", brmac_settings.id, pQmapDev->bridge_self_mac[0], + pQmapDev->bridge_self_mac[1], pQmapDev->bridge_self_mac[2], pQmapDev->bridge_self_mac[3], pQmapDev->bridge_self_mac[4], pQmapDev->bridge_self_mac[5]); + } + else { + pr_info("ioctl 0x89F4 change bridge(%d) lan mac -> error id\n", brmac_settings.id); + rc = -1; + } + break; +#endif + + default: + break; + } + + return rc; +} + +#ifdef QUECTEL_BRIDGE_MODE +static int is_qmap_netdev(const struct net_device *netdev) { + return netdev->netdev_ops == &rmnet_vnd_ops; +} +#endif +#endif + +static struct sk_buff *qmi_wwan_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { + //MDM9x07,MDM9628,MDM9x40,SDX20,SDX24 only work on RAW IP mode + if ((dev->driver_info->flags & FLAG_NOARP) == 0) + return skb; + + // Skip Ethernet header from message + if (dev->net->hard_header_len == 0) + return skb; + else + skb_reset_mac_header(skb); + +#ifdef QUECTEL_BRIDGE_MODE +{ + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + if (pQmapDev->bridge_mode && bridge_mode_tx_fixup(dev->net, skb, pQmapDev->bridge_ipv4, pQmapDev->bridge_mac) == NULL) { + dev_kfree_skb_any (skb); + return NULL; + } +} +#endif + + if (skb_pull(skb, ETH_HLEN)) { + return skb; + } else { + dev_err(&dev->intf->dev, "Packet Dropped "); + } + + // Filter the packet out, release it + dev_kfree_skb_any(skb); + return NULL; +} +#endif + +/* Make up an ethernet header if the packet doesn't have one. + * + * A firmware bug common among several devices cause them to send raw + * IP packets under some circumstances. There is no way for the + * driver/host to know when this will happen. And even when the bug + * hits, some packets will still arrive with an intact header. + * + * The supported devices are only capably of sending IPv4, IPv6 and + * ARP packets on a point-to-point link. Any packet with an ethernet + * header will have either our address or a broadcast/multicast + * address as destination. ARP packets will always have a header. + * + * This means that this function will reliably add the appropriate + * header iff necessary, provided our hardware address does not start + * with 4 or 6. + * + * Another common firmware bug results in all packets being addressed + * to 00:a0:c6:00:00:00 despite the host address being different. + * This function will also fixup such packets. + */ +static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + __be16 proto; + + /* This check is no longer done by usbnet */ + if (skb->len < dev->net->hard_header_len) + return 0; + + switch (skb->data[0] & 0xf0) { + case 0x40: + proto = htons(ETH_P_IP); + break; + case 0x60: + proto = htons(ETH_P_IPV6); + break; + case 0x00: + if (is_multicast_ether_addr(skb->data)) + return 1; + /* possibly bogus destination - rewrite just in case */ + skb_reset_mac_header(skb); + goto fix_dest; + default: + /* pass along other packets without modifications */ + return 1; + } + if (skb_headroom(skb) < ETH_HLEN) + return 0; + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + eth_hdr(skb)->h_proto = proto; + memset(eth_hdr(skb)->h_source, 0, ETH_ALEN); +#if 1 //Added by Quectel + //some kernel will drop ethernet packet which's souce mac is all zero + memcpy(eth_hdr(skb)->h_source, default_modem_addr, ETH_ALEN); +#endif + +fix_dest: +#ifdef QUECTEL_BRIDGE_MODE +{ + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + bridge_mode_rx_fixup(pQmapDev, dev->net, skb); +} +#else + memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); +#endif + + return 1; +} + +#if defined(QUECTEL_WWAN_QMAP) +static struct sk_buff *qmap_qmi_wwan_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + + if (unlikely(pQmapDev == NULL)) { + goto drop_skb; + } else if (unlikely(pQmapDev->qmap_mode && !pQmapDev->link_state)) { + dev_dbg(&dev->net->dev, "link_state 0x%x, drop skb, len = %u\n", pQmapDev->link_state, skb->len); + goto drop_skb; + } else if (pQmapDev->qmap_mode == 0) { + skb = qmi_wwan_tx_fixup(dev, skb, flags); + } + else if (pQmapDev->qmap_mode > 1) { + WARN_ON(1); //never reach here. + } + else { + if (likely(skb)) { + skb = qmi_wwan_tx_fixup(dev, skb, flags); + + if (skb) { + if(pQmapDev->qmap_version == 5) + add_qhdr(skb, QUECTEL_QMAP_MUX_ID); + else + add_qhdr_v5(skb, QUECTEL_QMAP_MUX_ID); + } + else { + return NULL; + } + } + } + + return skb; +drop_skb: + dev_kfree_skb_any (skb); + return NULL; +} + +static void qmap_packet_decode(sQmiWwanQmap *pQmapDev, + struct sk_buff *skb_in, struct sk_buff_head *skb_chain) +{ + struct device *dev = &pQmapDev->mpNetDev->net->dev; + struct sk_buff *qmap_skb; + uint dl_minimum_padding = 0; + + if (pQmapDev->qmap_version == 9) + dl_minimum_padding = pQmapDev->tx_ctx.dl_minimum_padding; + + /* __skb_queue_head_init() do not call spin_lock_init(&list->lock), + so should not call skb_queue_tail/queue later. */ + __skb_queue_head_init(skb_chain); + + while (skb_in->len > sizeof(struct qmap_hdr)) { + struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb_in->data; + struct rmnet_map_v5_csum_header *ul_header = NULL; + size_t hdr_size = sizeof(struct rmnet_map_header); + struct net_device *qmap_net; + int pkt_len = ntohs(map_header->pkt_len); + int skb_len; + __be16 protocol; + int mux_id; + int skip_nss = 0; + + if (map_header->next_hdr) { + ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1); + hdr_size += sizeof(struct rmnet_map_v5_csum_header); + } + + skb_len = pkt_len - (map_header->pad_len&0x3F); + skb_len -= dl_minimum_padding; + + mux_id = map_header->mux_id - QUECTEL_QMAP_MUX_ID; + if (mux_id >= pQmapDev->qmap_mode) { + dev_info(dev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto error_pkt; + } + + qmap_net = pQmapDev->mpQmapNetDev[mux_id]; + if (qmap_net == NULL) { + dev_info(dev, "drop qmap unknow mux_id %x\n", map_header->mux_id); + goto skip_pkt; + } + + if (skb_len > qmap_net->mtu) { + dev_info(dev, "drop skb_len=%x larger than mtu=%d\n", skb_len, qmap_net->mtu); + goto error_pkt; + } + + if (skb_in->len < (pkt_len + hdr_size)) { + dev_info(dev, "drop qmap unknow pkt, len=%d, pkt_len=%d\n", skb_in->len, pkt_len); + goto error_pkt; + } + + if (map_header->cd_bit) { + dev_info(dev, "skip qmap command packet\n"); + goto skip_pkt; + } + + switch (skb_in->data[hdr_size] & 0xf0) { + case 0x40: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct iphdr *ip4h = (struct iphdr *)(&skb_in->data[hdr_size]); + if (ip4h->protocol == IPPROTO_ICMP) { + skip_nss = 1; + } + } +#endif + protocol = htons(ETH_P_IP); + break; + case 0x60: +#ifdef CONFIG_QCA_NSS_PACKET_FILTER + { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(&skb_in->data[hdr_size]); + if (ip6h->nexthdr == NEXTHDR_ICMP) { + skip_nss = 1; + } + } +#endif + protocol = htons(ETH_P_IPV6); + break; + default: + dev_info(dev, "unknow skb->protocol %02x\n", skb_in->data[hdr_size]); + goto error_pkt; + } + + qmap_skb = netdev_alloc_skb(qmap_net, skb_len); + if (qmap_skb) { + skb_put(qmap_skb, skb_len); + memcpy(qmap_skb->data, skb_in->data + hdr_size, skb_len); + } + + if (qmap_skb == NULL) { + dev_info(dev, "fail to alloc skb, pkt_len = %d\n", skb_len); + goto error_pkt; + } + + skb_reset_transport_header(qmap_skb); + skb_reset_network_header(qmap_skb); + qmap_skb->pkt_type = PACKET_HOST; + skb_set_mac_header(qmap_skb, 0); + qmap_skb->protocol = protocol; + + if(skip_nss) + qmap_skb->cb[0] = 1; + + if (ul_header && ul_header->header_type == RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD + && ul_header->csum_valid_required) { +#if 0 //TODO + qmap_skb->ip_summed = CHECKSUM_UNNECESSARY; +#endif + } + + if (qmap_skb->dev->type == ARPHRD_ETHER) { + skb_push(qmap_skb, ETH_HLEN); + skb_reset_mac_header(qmap_skb); + memcpy(eth_hdr(qmap_skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(qmap_skb)->h_dest, qmap_net->dev_addr, ETH_ALEN); + eth_hdr(qmap_skb)->h_proto = protocol; +#ifdef QUECTEL_BRIDGE_MODE + bridge_mode_rx_fixup(pQmapDev, qmap_net, qmap_skb); +#endif + } + + __skb_queue_tail(skb_chain, qmap_skb); + +skip_pkt: + skb_pull(skb_in, pkt_len + hdr_size); + } + +error_pkt: + return; +} + +static int qmap_qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) +{ + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + struct sk_buff *qmap_skb; + struct sk_buff_head skb_chain; + + if (pQmapDev->qmap_mode == 0) + return qmi_wwan_rx_fixup(dev, skb_in); + + qmap_packet_decode(pQmapDev, skb_in, &skb_chain); + + while ((qmap_skb = __skb_dequeue (&skb_chain))) { + if (qmap_skb->dev != dev->net) { + WARN_ON(1); //never reach here. + } + else { + qmap_skb->protocol = 0; + usbnet_skb_return(dev, qmap_skb); + } + } + + return 0; +} +#endif + +/* very simplistic detection of IPv4 or IPv6 headers */ +static bool possibly_iphdr(const char *data) +{ + return (data[0] & 0xd0) == 0x40; +} + +/* disallow addresses which may be confused with IP headers */ +static int qmi_wwan_mac_addr(struct net_device *dev, void *p) +{ + int ret; + struct sockaddr *addr = p; + + ret = eth_prepare_mac_addr_change(dev, p); + if (ret < 0) + return ret; + if (possibly_iphdr(addr->sa_data)) + return -EADDRNOTAVAIL; + eth_commit_mac_addr_change(dev, p); + return 0; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,10,0 )) //bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 +static void (*_usbnet_get_stats64)(struct net_device *net, struct rtnl_link_stats64 *stats); + +static void qmi_wwan_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + if (_usbnet_get_stats64) ////c8b5d129ee293bcf972e7279ac996bb8a138505c + return _usbnet_get_stats64(net, stats); + + netdev_stats_to_stats64(stats, &net->stats); +} +#else +static struct rtnl_link_stats64 * (*_usbnet_get_stats64)(struct net_device *net, struct rtnl_link_stats64 *stats); + +static struct rtnl_link_stats64 * qmi_wwan_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) { + if (_usbnet_get_stats64) + return _usbnet_get_stats64(net, stats); + + netdev_stats_to_stats64(stats, &net->stats); + return stats; +} +#endif + +static int qmi_wwan_open (struct net_device *net) { + struct usbnet * usbnetdev = netdev_priv( net ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + int retval; + + retval = usbnet_open(net); + + if (!retval) { + if (pQmapDev && pQmapDev->qmap_mode == 1) { + if (pQmapDev->link_state) + netif_carrier_on(net); + } + } + + return retval; +} + +static netdev_tx_t qmi_wwan_start_xmit (struct sk_buff *skb, + struct net_device *net) +{ + struct usbnet * usbnetdev = netdev_priv( net ); + struct qmi_wwan_state *info = (void *)&usbnetdev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + int retval; + + retval = usbnet_start_xmit(skb, net); + + if (netif_queue_stopped(net) && pQmapDev && pQmapDev->use_rmnet_usb) { + int i; + + for (i = 0; i < pQmapDev->qmap_mode; i++) { + struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i]; + if (qmap_net) { + netif_stop_queue(qmap_net); + } + } + } + + return retval; +} + +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 5,14,0 )) //b9067f5dc4a07c8e24e01a1b277c6722d91be39e +#define use_ndo_siocdevprivate +#endif +#ifdef use_ndo_siocdevprivate +static int qmap_ndo_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { + return qmap_ndo_do_ioctl(dev, ifr, cmd); +} +#endif + +static const struct net_device_ops qmi_wwan_netdev_ops = { + .ndo_open = qmi_wwan_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = qmi_wwan_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_change_mtu = usbnet_change_mtu, + .ndo_get_stats64 = qmi_wwan_get_stats64, + .ndo_set_mac_address = qmi_wwan_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#if defined(QUECTEL_WWAN_QMAP)// && defined(CONFIG_ANDROID) + .ndo_do_ioctl = qmap_ndo_do_ioctl, +#ifdef use_ndo_siocdevprivate + .ndo_siocdevprivate = qmap_ndo_siocdevprivate, +#endif +#endif +}; + +static void ql_net_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) +{ + /* Inherit standard device info */ + usbnet_get_drvinfo(net, info); + strlcpy(info->driver, driver_name, sizeof(info->driver)); + strlcpy(info->version, VERSION_NUMBER, sizeof(info->version)); +} + +static struct ethtool_ops ql_net_ethtool_ops; + +/* using a counter to merge subdriver requests with our own into a + * combined state + */ +static int qmi_wwan_manage_power(struct usbnet *dev, int on) +{ + struct qmi_wwan_state *info = (void *)&dev->data; + int rv; + + dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, + atomic_read(&info->pmcount), on); + + if ((on && atomic_add_return(1, &info->pmcount) == 1) || + (!on && atomic_dec_and_test(&info->pmcount))) { + /* need autopm_get/put here to ensure the usbcore sees + * the new value + */ + rv = usb_autopm_get_interface(dev->intf); + dev->intf->needs_remote_wakeup = on; + if (!rv) + usb_autopm_put_interface(dev->intf); + } + return 0; +} + +static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on) +{ + struct usbnet *dev = usb_get_intfdata(intf); + + /* can be called while disconnecting */ + if (!dev) + return 0; + return qmi_wwan_manage_power(dev, on); +} + +/* collect all three endpoints and register subdriver */ +static int qmi_wwan_register_subdriver(struct usbnet *dev) +{ + int rv; + struct usb_driver *subdriver = NULL; + struct qmi_wwan_state *info = (void *)&dev->data; + + /* collect bulk endpoints */ + rv = usbnet_get_endpoints(dev, info->data); + if (rv < 0) + goto err; + + /* update status endpoint if separate control interface */ + if (info->control != info->data) + dev->status = &info->control->cur_altsetting->endpoint[0]; + + /* require interrupt endpoint for subdriver */ + if (!dev->status) { + rv = -EINVAL; + goto err; + } + + /* for subdriver power management */ + atomic_set(&info->pmcount, 0); + + /* register subdriver */ +#if (LINUX_VERSION_CODE > KERNEL_VERSION( 5,12,0 )) //cac6fb015f719104e60b1c68c15ca5b734f57b9c + subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, + 4096, WWAN_PORT_QMI, &qmi_wwan_cdc_wdm_manage_power); +#else + subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, + 4096, &qmi_wwan_cdc_wdm_manage_power); + +#endif + if (IS_ERR(subdriver)) { + dev_err(&info->control->dev, "subdriver registration failed\n"); + rv = PTR_ERR(subdriver); + goto err; + } + + /* prevent usbnet from using status endpoint */ + dev->status = NULL; + + /* save subdriver struct for suspend/resume wrappers */ + info->subdriver = subdriver; + +err: + return rv; +} + +static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int status = -1; + struct usb_driver *driver = driver_of(intf); + struct qmi_wwan_state *info = (void *)&dev->data; + + BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < + sizeof(struct qmi_wwan_state))); + + /* set up initial state */ + info->control = intf; + info->data = intf; + + status = qmi_wwan_register_subdriver(dev); + if (status < 0 && info->control != info->data) { + usb_set_intfdata(info->data, NULL); + usb_driver_release_interface(driver, info->data); + } + + /* Never use the same address on both ends of the link, even + * if the buggy firmware told us to. + */ + if (ether_addr_equal(dev->net->dev_addr, default_modem_addr)) + eth_hw_addr_random(dev->net); + + /* make MAC addr easily distinguishable from an IP header */ + if (possibly_iphdr(dev->net->dev_addr)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0) + dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */ + dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */ +#else + u8 addr = dev->net->dev_addr[0]; + + addr |= 0x02; /* set local assignment bit */ + addr &= 0xbf; /* clear "IP" bit */ + dev_addr_mod(dev->net, 0, &addr, 1); +#endif + } + if (!_usbnet_get_stats64) + _usbnet_get_stats64 = dev->net->netdev_ops->ndo_get_stats64; + dev->net->netdev_ops = &qmi_wwan_netdev_ops; + + ql_net_ethtool_ops = *dev->net->ethtool_ops; + ql_net_ethtool_ops.get_drvinfo = ql_net_get_drvinfo; + dev->net->ethtool_ops = &ql_net_ethtool_ops; + +#if 1 //Added by Quectel + if (dev->driver_info->flags & FLAG_NOARP) { + int ret; + char buf[32] = "Module"; + + ret = usb_string(dev->udev, dev->udev->descriptor.iProduct, buf, sizeof(buf)); + if (ret > 0) { + buf[ret] = '\0'; + } + + dev_info(&intf->dev, "Quectel %s work on RawIP mode\n", buf); + dev->net->flags |= IFF_NOARP; + dev->net->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + usb_control_msg( + interface_to_usbdev(intf), + usb_sndctrlpipe(interface_to_usbdev(intf), 0), + 0x22, //USB_CDC_REQ_SET_CONTROL_LINE_STATE + 0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + 1, //active CDC DTR + intf->cur_altsetting->desc.bInterfaceNumber, + NULL, 0, 100); + } + + //to advoid module report mtu 1460, but rx 1500 bytes IP packets, and cause the customer's system crash + //next setting can make usbnet.c:usbnet_change_mtu() do not modify rx_urb_size according to hard mtu + dev->rx_urb_size = ETH_DATA_LEN + ETH_HLEN + 6; + +#if defined(QUECTEL_WWAN_QMAP) + if (qmap_mode > QUECTEL_WWAN_QMAP) + qmap_mode = QUECTEL_WWAN_QMAP; + + if (!status) + { + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)kzalloc(sizeof(sQmiWwanQmap), GFP_KERNEL); + + if (pQmapDev == NULL) + return -ENODEV; + +#ifdef QUECTEL_BRIDGE_MODE + pQmapDev->bridge_mode = bridge_mode; +#ifdef CONFIG_BRIDGE_LAN + memcpy(pQmapDev->bridge_self_mac, broadcast_mac_addr, ETH_ALEN); +#endif +#endif + pQmapDev->mpNetDev = dev; + pQmapDev->link_state = 1; + //on OpenWrt, if set rmnet_usb0.1 as WAN, '/sbin/netifd' will auto create VLAN for rmnet_usb0 + dev->net->features |= (NETIF_F_VLAN_CHALLENGED); + + if (dev->driver_info->flags & FLAG_NOARP) + { + int qmap_version = (dev->driver_info->data>>8)&0xFF; + int qmap_size = (dev->driver_info->data)&0xFF; + int idProduct = le16_to_cpu(dev->udev->descriptor.idProduct); + int lte_a = (idProduct == 0x0306 || idProduct == 0x030B || idProduct == 0x0512 || idProduct == 0x0620 || idProduct == 0x0800 || idProduct == 0x0801); + + if (qmap_size > 4096 || dev->udev->speed >= USB_SPEED_SUPER) { //if meet this requirements, must be LTE-A or 5G + lte_a = 1; + } + + pQmapDev->qmap_mode = qmap_mode; + if (lte_a && pQmapDev->qmap_mode == 0) { + pQmapDev->qmap_mode = 1; //force use QMAP + if(qmap_mode == 0) + qmap_mode = 1; //old quectel-CM only check sys/module/wwan0/parameters/qmap_mode + } + + if (pQmapDev->qmap_mode) { + pQmapDev->qmap_version = qmap_version; + pQmapDev->qmap_size = qmap_size*1024; + dev->rx_urb_size = pQmapDev->qmap_size; + //for these modules, if send packet before qmi_start_network, or cause host PC crash, or cause modules crash + pQmapDev->link_state = !lte_a; + + if (pQmapDev->qmap_mode > 1) + pQmapDev->use_rmnet_usb = 1; + else if (idProduct == 0x0800 || idProduct == 0x0801) + pQmapDev->use_rmnet_usb = 1; //benefit for ul data agg +#ifdef QMI_NETDEV_ONE_CARD_MODE + if(pQmapDev->use_rmnet_usb == 1 && pQmapDev->qmap_mode == 1) + one_card_mode = 1; + pQmapDev->rmnet_info.mux_id[0] = QUECTEL_QMAP_MUX_ID; +#endif + pQmapDev->rmnet_info.size = sizeof(RMNET_INFO); + pQmapDev->rmnet_info.rx_urb_size = pQmapDev->qmap_size; + pQmapDev->rmnet_info.ep_type = 2; //DATA_EP_TYPE_HSUSB + pQmapDev->rmnet_info.iface_id = 4; + pQmapDev->rmnet_info.qmap_mode = pQmapDev->qmap_mode; + pQmapDev->rmnet_info.qmap_version = pQmapDev->qmap_version; + pQmapDev->rmnet_info.dl_minimum_padding = 0; + +#if defined(QUECTEL_UL_DATA_AGG) + pQmapDev->tx_ctx.ul_data_aggregation_max_datagrams = 1; + pQmapDev->tx_ctx.ul_data_aggregation_max_size = 1500; +#endif + + if (pQmapDev->use_rmnet_usb && !one_card_mode) { + pQmapDev->driver_info = rmnet_usb_info; + pQmapDev->driver_info.data = dev->driver_info->data; + dev->driver_info = &pQmapDev->driver_info; + } + + if (pQmapDev->use_rmnet_usb && !one_card_mode) { + pQmapDev->usbnet_bh = dev->bh; + tasklet_init(&dev->bh, usbnet_bh, (unsigned long)pQmapDev); + } + } + } + + info->unused = (unsigned long)pQmapDev; + dev->net->sysfs_groups[0] = &qmi_wwan_sysfs_attr_group; + + dev_info(&intf->dev, "rx_urb_size = %zd\n", dev->rx_urb_size); + } +#endif +#endif + + return status; +} + +static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) +{ + struct qmi_wwan_state *info = (void *)&dev->data; + struct usb_driver *driver = driver_of(intf); + struct usb_interface *other; + + if (dev->udev && dev->udev->state == USB_STATE_CONFIGURED) { + usb_control_msg( + interface_to_usbdev(intf), + usb_sndctrlpipe(interface_to_usbdev(intf), 0), + 0x22, //USB_CDC_REQ_SET_CONTROL_LINE_STATE + 0x21, //USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE + 0, //deactive CDC DTR + intf->cur_altsetting->desc.bInterfaceNumber, + NULL, 0, 100); + } + + if (info->subdriver && info->subdriver->disconnect) + info->subdriver->disconnect(info->control); + + /* allow user to unbind using either control or data */ + if (intf == info->control) + other = info->data; + else + other = info->control; + + /* only if not shared */ + if (other && intf != other) { + usb_set_intfdata(other, NULL); + usb_driver_release_interface(driver, other); + } + + info->subdriver = NULL; + info->data = NULL; + info->control = NULL; +} + +/* suspend/resume wrappers calling both usbnet and the cdc-wdm + * subdriver if present. + * + * NOTE: cdc-wdm also supports pre/post_reset, but we cannot provide + * wrappers for those without adding usbnet reset support first. + */ +static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct usbnet *dev = usb_get_intfdata(intf); + struct qmi_wwan_state *info = (void *)&dev->data; + int ret; + + /* Both usbnet_suspend() and subdriver->suspend() MUST return 0 + * in system sleep context, otherwise, the resume callback has + * to recover device from previous suspend failure. + */ + ret = usbnet_suspend(intf, message); + if (ret < 0) + goto err; + + if (intf == info->control && info->subdriver && + info->subdriver->suspend) + ret = info->subdriver->suspend(intf, message); + if (ret < 0) + usbnet_resume(intf); +err: + return ret; +} + +static int qmi_wwan_resume(struct usb_interface *intf) +{ + struct usbnet *dev = usb_get_intfdata(intf); + struct qmi_wwan_state *info = (void *)&dev->data; + int ret = 0; + bool callsub = (intf == info->control && info->subdriver && + info->subdriver->resume); + + if (callsub) + ret = info->subdriver->resume(intf); + if (ret < 0) + goto err; + ret = usbnet_resume(intf); + if (ret < 0 && callsub) + info->subdriver->suspend(intf, PMSG_SUSPEND); + +#if defined(QUECTEL_WWAN_QMAP) + if (!netif_queue_stopped(dev->net)) { + qmap_wake_queue((sQmiWwanQmap *)info->unused); + } +#endif + +err: + return ret; +} + +static int qmi_wwan_reset_resume(struct usb_interface *intf) +{ + dev_info(&intf->dev, "device do not support reset_resume\n"); + intf->needs_binding = 1; + return -EOPNOTSUPP; +} + +static struct sk_buff *rmnet_usb_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) +{ + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (skb->protocol != htons(ETH_P_MAP)) { + dev_kfree_skb_any(skb); + return NULL; + } + + return skb; +} + +static int rmnet_usb_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + struct net_device *net = dev->net; + unsigned headroom = skb_headroom(skb); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION( 3,3,1 )) //7bdd402706cf26bfef9050dfee3f229b7f33ee4f +//some customers port to v3.2 + if (net->type == ARPHRD_ETHER && headroom < ETH_HLEN) { + unsigned tailroom = skb_tailroom(skb); + + if ((tailroom + headroom) >= ETH_HLEN) { + unsigned moveroom = ETH_HLEN - headroom; + + memmove(skb->data + moveroom ,skb->data, skb->len); + skb->data += moveroom; + skb->tail += moveroom; + #ifdef WARN_ONCE + WARN_ONCE(1, "It is better reserve headroom in usbnet.c:rx_submit()!\n"); + #endif + } + } +#endif + + //printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len); + if (net->type == ARPHRD_ETHER && headroom >= ETH_HLEN) { + //usbnet.c rx_process() usbnet_skb_return() eth_type_trans() + skb_push(skb, ETH_HLEN); + skb_reset_mac_header(skb); + memcpy(eth_hdr(skb)->h_source, default_modem_addr, ETH_ALEN); + memcpy(eth_hdr(skb)->h_dest, net->dev_addr, ETH_ALEN); + eth_hdr(skb)->h_proto = htons(ETH_P_MAP); + + return 1; + } + + return 0; +} + +static rx_handler_result_t rmnet_usb_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct usbnet *dev; + struct qmi_wwan_state *info; + sQmiWwanQmap *pQmapDev; + struct sk_buff *qmap_skb; + struct sk_buff_head skb_chain; + + if (!skb) + goto done; + + //printk("%s skb=%p, protocol=%x, len=%d\n", __func__, skb, skb->protocol, skb->len); + + if (skb->pkt_type == PACKET_LOOPBACK) + return RX_HANDLER_PASS; + + if (skb->protocol != htons(ETH_P_MAP)) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + /* when open hyfi function, run cm will make system crash */ + //dev = rcu_dereference(skb->dev->rx_handler_data); + dev = netdev_priv(skb->dev); + + if (dev == NULL) { + WARN_ON(1); + return RX_HANDLER_PASS; + } + + info = (struct qmi_wwan_state *)&dev->data; + pQmapDev = (sQmiWwanQmap *)info->unused; + + qmap_packet_decode(pQmapDev, skb, &skb_chain); + while ((qmap_skb = __skb_dequeue (&skb_chain))) { + struct net_device *qmap_net = qmap_skb->dev; + + rmnet_vnd_update_rx_stats(qmap_net, 1, qmap_skb->len); + if (qmap_net->type == ARPHRD_ETHER) + __skb_pull(qmap_skb, ETH_HLEN); + netif_receive_skb(qmap_skb); + } + consume_skb(skb); + +done: + return RX_HANDLER_CONSUMED; +} + +static const struct driver_info qmi_wwan_info = { + .description = "WWAN/QMI device", + .flags = FLAG_WWAN, + .bind = qmi_wwan_bind, + .unbind = qmi_wwan_unbind, + .manage_power = qmi_wwan_manage_power, +}; + +#define qmi_wwan_raw_ip_info \ + .description = "WWAN/QMI device", \ + .flags = FLAG_WWAN | FLAG_RX_ASSEMBLE | FLAG_NOARP | FLAG_SEND_ZLP, \ + .bind = qmi_wwan_bind, \ + .unbind = qmi_wwan_unbind, \ + .manage_power = qmi_wwan_manage_power, \ + .tx_fixup = qmap_qmi_wwan_tx_fixup, \ + .rx_fixup = qmap_qmi_wwan_rx_fixup, \ + +static const struct driver_info rmnet_usb_info = { + .description = "RMNET/USB device", + .flags = FLAG_WWAN | FLAG_NOARP | FLAG_SEND_ZLP, + .bind = qmi_wwan_bind, + .unbind = qmi_wwan_unbind, + .manage_power = qmi_wwan_manage_power, + .tx_fixup = rmnet_usb_tx_fixup, + .rx_fixup = rmnet_usb_rx_fixup, +}; + +static const struct driver_info qmi_wwan_raw_ip_info_mdm9x07 = { + qmi_wwan_raw_ip_info + .data = (5<<8)|4, //QMAPV1 and 4KB +}; + +// mdm9x40/sdx12/sdx20/sdx24 share the same config +static const struct driver_info qmi_wwan_raw_ip_info_mdm9x40 = { + qmi_wwan_raw_ip_info + .data = (5<<8)|16, //QMAPV1 and 16KB +}; + +static const struct driver_info qmi_wwan_raw_ip_info_sdx55 = { + qmi_wwan_raw_ip_info + .data = (9<<8)|31, //QMAPV5 and 31KB +}; + +/* map QMI/wwan function by a fixed interface number */ +#define QMI_FIXED_INTF(vend, prod, num) \ + USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ + .driver_info = (unsigned long)&qmi_wwan_info + +#define QMI_FIXED_RAWIP_INTF(vend, prod, num, chip) \ + USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ + .driver_info = (unsigned long)&qmi_wwan_raw_ip_info_##chip + +static const struct usb_device_id products[] = { + { QMI_FIXED_INTF(0x05C6, 0x9003, 4) }, /* Quectel UC20 */ + { QMI_FIXED_INTF(0x05C6, 0x9215, 4) }, /* Quectel EC20 (MDM9215) */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0125, 4, mdm9x07) }, /* Quectel EC20 (MDM9X07)/EC25/EG25 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0121, 4, mdm9x07) }, /* Quectel EC21 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0191, 4, mdm9x07) }, /* Quectel EG91 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0195, 4, mdm9x07) }, /* Quectel EG95 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0700, 3, mdm9x07) }, /* Quectel BG95 (at+qcfgext="usbnet","rmnet") */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0306, 4, mdm9x40) }, /* Quectel EG06/EP06/EM06 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x030B, 4, mdm9x40) }, /* Quectel EG065k/EG060K */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0512, 4, mdm9x40) }, /* Quectel EG12/EP12/EM12/EG16/EG18 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0296, 4, mdm9x07) }, /* Quectel BG96 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0435, 4, mdm9x07) }, /* Quectel AG35 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0620, 4, mdm9x40) }, /* Quectel EG20 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0800, 4, sdx55) }, /* Quectel RG500 */ + { QMI_FIXED_RAWIP_INTF(0x2C7C, 0x0801, 4, sdx55) }, /* Quectel RG520 */ + { } /* END */ +}; +MODULE_DEVICE_TABLE(usb, products); + +static int qmi_wwan_probe(struct usb_interface *intf, + const struct usb_device_id *prod) +{ + struct usb_device_id *id = (struct usb_device_id *)prod; + + /* Workaround to enable dynamic IDs. This disables usbnet + * blacklisting functionality. Which, if required, can be + * reimplemented here by using a magic "blacklist" value + * instead of 0 in the static device id table + */ + if (!id->driver_info) { + dev_dbg(&intf->dev, "setting defaults for dynamic device id\n"); + id->driver_info = (unsigned long)&qmi_wwan_info; + } + + if (intf->cur_altsetting->desc.bInterfaceClass != 0xff) { + dev_info(&intf->dev, "Quectel module not qmi_wwan mode! please check 'at+qcfg=\"usbnet\"'\n"); + return -ENODEV; + } + + return usbnet_probe(intf, id); +} + +#if defined(QUECTEL_WWAN_QMAP) +static int qmap_qmi_wwan_probe(struct usb_interface *intf, + const struct usb_device_id *prod) +{ + int status = qmi_wwan_probe(intf, prod); + + if (!status) { + struct usbnet *dev = usb_get_intfdata(intf); + struct qmi_wwan_state *info = (void *)&dev->data; + sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused; + unsigned i; + + if (!pQmapDev) + return status; + + tasklet_init(&pQmapDev->txq, rmnet_usb_tx_wake_queue, (unsigned long)pQmapDev); + + if (pQmapDev->qmap_mode == 1) { + pQmapDev->mpQmapNetDev[0] = dev->net; + if (pQmapDev->use_rmnet_usb && !one_card_mode) { + pQmapDev->mpQmapNetDev[0] = NULL; + qmap_register_device(pQmapDev, 0); + } + } + else if (pQmapDev->qmap_mode > 1) { + for (i = 0; i < pQmapDev->qmap_mode; i++) { + qmap_register_device(pQmapDev, i); + } + } + + if (pQmapDev->use_rmnet_usb && !one_card_mode) { + rtnl_lock(); + /* when open hyfi function, run cm will make system crash */ + //netdev_rx_handler_register(dev->net, rmnet_usb_rx_handler, dev); + netdev_rx_handler_register(dev->net, rmnet_usb_rx_handler, NULL); + rtnl_unlock(); + } + + if (pQmapDev->link_state == 0) { + netif_carrier_off(dev->net); + } + } + + return status; +} + +static void qmap_qmi_wwan_disconnect(struct usb_interface *intf) +{ + struct usbnet *dev = usb_get_intfdata(intf); + struct qmi_wwan_state *info; + sQmiWwanQmap *pQmapDev; + uint i; + + if (!dev) + return; + + info = (void *)&dev->data; + pQmapDev = (sQmiWwanQmap *)info->unused; + + if (!pQmapDev) { + return usbnet_disconnect(intf); + } + + pQmapDev->link_state = 0; + + if (pQmapDev->qmap_mode > 1) { + for (i = 0; i < pQmapDev->qmap_mode; i++) { + qmap_unregister_device(pQmapDev, i); + } + } + + if (pQmapDev->use_rmnet_usb && !one_card_mode) { + qmap_unregister_device(pQmapDev, 0); + rtnl_lock(); + netdev_rx_handler_unregister(dev->net); + rtnl_unlock(); + } + + tasklet_kill(&pQmapDev->txq); + + usbnet_disconnect(intf); + /* struct usbnet *dev had free by usbnet_disconnect()->free_netdev(). + so we should access info. */ + //info->unused = 0; + kfree(pQmapDev); +} +#endif + +static struct usb_driver qmi_wwan_driver = { + .name = "qmi_wwan_q", + .id_table = products, + .probe = qmi_wwan_probe, +#if defined(QUECTEL_WWAN_QMAP) + .probe = qmap_qmi_wwan_probe, + .disconnect = qmap_qmi_wwan_disconnect, +#else + .probe = qmi_wwan_probe, + .disconnect = usbnet_disconnect, +#endif + .suspend = qmi_wwan_suspend, + .resume = qmi_wwan_resume, + .reset_resume = qmi_wwan_reset_resume, + .supports_autosuspend = 1, + .disable_hub_initiated_lpm = 1, +}; + +static int __init qmi_wwan_driver_init(void) +{ +#ifdef CONFIG_QCA_NSS_DRV + nss_cb = rcu_dereference(rmnet_nss_callbacks); + if (!nss_cb) { + printk(KERN_ERR "qmi_wwan_driver_init: driver load must after '/etc/modules.d/42-rmnet-nss'\n"); + } +#endif + return usb_register(&qmi_wwan_driver); +} +module_init(qmi_wwan_driver_init); +static void __exit qmi_wwan_driver_exit(void) +{ + usb_deregister(&qmi_wwan_driver); +} +module_exit(qmi_wwan_driver_exit); + +MODULE_AUTHOR("Bjørn Mork "); +MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(QUECTEL_WWAN_VERSION); diff --git a/wwan/driver/quectel_QMI_WWAN/src/rmnet_nss.c b/wwan/driver/quectel_QMI_WWAN/src/rmnet_nss.c new file mode 100644 index 0000000..1165910 --- /dev/null +++ b/wwan/driver/quectel_QMI_WWAN/src/rmnet_nss.c @@ -0,0 +1,498 @@ +/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef _RMNET_NSS_H_ +#define _RMENT_NSS_H_ + +struct rmnet_nss_cb { + int (*nss_create)(struct net_device *dev); + int (*nss_free)(struct net_device *dev); + int (*nss_tx)(struct sk_buff *skb); +}; + +extern struct rmnet_nss_cb *rmnet_nss_callbacks; + +#endif + +#define RMNET_NSS_HASH_BITS 8 +#define hash_add_ptr(table, node, key) \ + hlist_add_head(node, &table[hash_ptr(key, HASH_BITS(table))]) + +static DEFINE_HASHTABLE(rmnet_nss_ctx_hashtable, RMNET_NSS_HASH_BITS); + +struct rmnet_nss_ctx { + struct hlist_node hnode; + struct net_device *rmnet_dev; + struct nss_rmnet_rx_handle *nss_ctx; +}; + +enum __rmnet_nss_stat { + RMNET_NSS_RX_ETH, + RMNET_NSS_RX_FAIL, + RMNET_NSS_RX_NON_ETH, + RMNET_NSS_RX_BUSY, + RMNET_NSS_TX_NO_CTX, + RMNET_NSS_TX_SUCCESS, + RMNET_NSS_TX_FAIL, + RMNET_NSS_TX_NONLINEAR, + RMNET_NSS_TX_BAD_IP, + RMNET_NSS_EXCEPTIONS, + RMNET_NSS_EX_BAD_HDR, + RMNET_NSS_EX_BAD_IP, + RMNET_NSS_EX_SUCCESS, + RMNET_NSS_TX_BAD_FRAGS, + RMNET_NSS_TX_LINEARIZE_FAILS, + RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS, + RMNET_NSS_TX_BUSY_LOOP, + RMNET_NSS_NUM_STATS, +}; + +static unsigned long rmnet_nss_stats[RMNET_NSS_NUM_STATS]; +extern void qmi_rmnet_mark_skb(struct net_device *dev, struct sk_buff *skb); +static void (*rmnet_mark_skb)(struct net_device *dev, struct sk_buff *skb); + +#define RMNET_NSS_STAT(name, counter, desc) \ + module_param_named(name, rmnet_nss_stats[counter], ulong, 0444); \ + MODULE_PARM_DESC(name, desc) + +RMNET_NSS_STAT(rmnet_nss_rx_ethernet, RMNET_NSS_RX_ETH, + "Number of Ethernet headers successfully removed"); +RMNET_NSS_STAT(rmnet_nss_rx_fail, RMNET_NSS_RX_FAIL, + "Number of Ethernet headers that could not be removed"); +RMNET_NSS_STAT(rmnet_nss_rx_non_ethernet, RMNET_NSS_RX_NON_ETH, + "Number of non-Ethernet packets received"); +RMNET_NSS_STAT(rmnet_nss_rx_busy, RMNET_NSS_RX_BUSY, + "Number of packets dropped decause rmnet_data device was busy"); +RMNET_NSS_STAT(rmnet_nss_tx_slow, RMNET_NSS_TX_NO_CTX, + "Number of packets sent over non-NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_fast, RMNET_NSS_TX_SUCCESS, + "Number of packets sent over NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_fail, RMNET_NSS_TX_FAIL, + "Number of packets that NSS could not transmit"); +RMNET_NSS_STAT(rmnet_nss_tx_nonlinear, RMNET_NSS_TX_NONLINEAR, + "Number of non linear sent over NSS-accelerated rmnet device"); +RMNET_NSS_STAT(rmnet_nss_tx_invalid_ip, RMNET_NSS_TX_BAD_IP, + "Number of ingress packets with invalid IP headers"); +RMNET_NSS_STAT(rmnet_nss_tx_invalid_frags, RMNET_NSS_TX_BAD_FRAGS, + "Number of ingress packets with invalid frag format"); +RMNET_NSS_STAT(rmnet_nss_tx_linearize_fail, RMNET_NSS_TX_LINEARIZE_FAILS, + "Number of ingress packets where linearize in tx fails"); +RMNET_NSS_STAT(rmnet_nss_tx_exceptions, RMNET_NSS_EXCEPTIONS, + "Number of times our DL exception handler was invoked"); +RMNET_NSS_STAT(rmnet_nss_exception_non_ethernet, RMNET_NSS_EX_BAD_HDR, + "Number of non-Ethernet exception packets"); +RMNET_NSS_STAT(rmnet_nss_exception_invalid_ip, RMNET_NSS_EX_BAD_IP, + "Number of exception packets with invalid IP headers"); +RMNET_NSS_STAT(rmnet_nss_exception_success, RMNET_NSS_EX_SUCCESS, + "Number of exception packets handled successfully"); +RMNET_NSS_STAT(rmnet_nss_tx_non_zero_headlen_frags, RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS, + "Number of packets with non zero headlen"); +RMNET_NSS_STAT(rmnet_nss_tx_busy_loop, RMNET_NSS_TX_BUSY_LOOP, + "Number of times tx packets busy looped"); + +static void rmnet_nss_inc_stat(enum __rmnet_nss_stat stat) +{ + if (stat >= 0 && stat < RMNET_NSS_NUM_STATS) + rmnet_nss_stats[stat]++; +} + +static struct rmnet_nss_ctx *rmnet_nss_find_ctx(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + struct hlist_head *bucket; + u32 hash; + + hash = hash_ptr(dev, HASH_BITS(rmnet_nss_ctx_hashtable)); + bucket = &rmnet_nss_ctx_hashtable[hash]; + hlist_for_each_entry(ctx, bucket, hnode) { + if (ctx->rmnet_dev == dev) + return ctx; + } + + return NULL; +} + +static void rmnet_nss_free_ctx(struct rmnet_nss_ctx *ctx) +{ + if (ctx) { + hash_del(&ctx->hnode); + nss_rmnet_rx_xmit_callback_unregister(ctx->nss_ctx); + nss_rmnet_rx_destroy_sync(ctx->nss_ctx); + kfree(ctx); + } +} + +/* Pull off an ethernet header, if possible */ +static int rmnet_nss_ethhdr_pull(struct sk_buff *skb) +{ + if (!skb->protocol || skb->protocol == htons(ETH_P_802_3)) { + void *ret = skb_pull(skb, sizeof(struct ethhdr)); + + rmnet_nss_inc_stat((ret) ? RMNET_NSS_RX_ETH : + RMNET_NSS_RX_FAIL); + return !ret; + } + + rmnet_nss_inc_stat(RMNET_NSS_RX_NON_ETH); + return -1; +} +static int rmnet_nss_handle_non_zero_headlen(struct sk_buff *skb) +{ + struct iphdr *iph; + u8 transport; + + if (skb_headlen(skb) < sizeof(struct iphdr)){ + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + return -EINVAL; + } + + iph = (struct iphdr *)skb->data; + + if (iph->version == 4) { + transport = iph->protocol; + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + transport = ip6h->nexthdr; + } else { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + return -EINVAL; + } + +/* Assumption: required headers are copied in case of TCP/UDP by SFE */ +/* In case of TCP/UDP where there are no IP extension headers, the assumption is that SFE copied the IP and Transport header */ + + if (transport != IPPROTO_TCP && transport != IPPROTO_UDP) { + if (skb_linearize(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_LINEARIZE_FAILS); + return -EINVAL; + } + } + else if ((transport == IPPROTO_TCP && skb_headlen(skb) < 40) || (transport == IPPROTO_UDP && skb_headlen(skb) < 28)) { + pr_err_ratelimited("rmnet_nss: error: Partial copy of headers\n"); + return -EINVAL; + } + + return 0; +} + +/* Copy headers to linear section for non linear packets */ +static int rmnet_nss_adjust_header(struct sk_buff *skb) +{ + struct iphdr *iph; + skb_frag_t *frag; + int bytes = 0; + u8 transport; + + if (skb_shinfo(skb)->nr_frags != 1) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS); + return -EINVAL; + } + + if (skb_headlen(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_NON_ZERO_HEADLEN_FRAGS); + return rmnet_nss_handle_non_zero_headlen(skb); + } + + frag = &skb_shinfo(skb)->frags[0]; + + iph = (struct iphdr *)(skb_frag_address(frag)); + + if (iph->version == 4) { + bytes = iph->ihl*4; + transport = iph->protocol; + } else if (iph->version == 6) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)iph; + + bytes = sizeof(struct ipv6hdr); + /* Dont have to account for extension headers yet */ + transport = ip6h->nexthdr; + } else { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + return -EINVAL; + } + + if (transport == IPPROTO_TCP) { + struct tcphdr *th; + + th = (struct tcphdr *)((u8 *)iph + bytes); + bytes += th->doff * 4; + } else if (transport == IPPROTO_UDP) { + bytes += sizeof(struct udphdr); + } else { + /* cant do anything else here unfortunately so linearize */ + if (skb_linearize(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_LINEARIZE_FAILS); + return -EINVAL; + } else { + return 0; + } + } + + if (bytes > skb_frag_size(frag)) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_FRAGS); + return -EINVAL; + } + + skb_push(skb, bytes); + memcpy(skb->data, iph, bytes); + + /* subtract to account for skb_push */ + skb->len -= bytes; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) + frag->bv_offset += bytes; +#else + frag->page_offset += bytes; +#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(5, 4, 0)) */ + skb_frag_size_sub(frag, bytes); + + /* subtract to account for skb_frag_size_sub */ + skb->data_len -= bytes; + + return 0; +} + +/* Called by NSS in the DL exception case. + * Since the packet cannot be sent over the accelerated path, we need to + * handle it. Remove the ethernet header and pass it onward to the stack + * if possible. + */ +void rmnet_nss_receive(struct net_device *dev, struct sk_buff *skb, + struct napi_struct *napi) +{ + rmnet_nss_inc_stat(RMNET_NSS_EXCEPTIONS); + + if (!skb) + return; + + if (rmnet_nss_ethhdr_pull(skb)) { + rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_HDR); + goto drop; + } + + /* reset header pointers */ + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + skb_reset_mac_header(skb); + + /* reset packet type */ + skb->pkt_type = PACKET_HOST; + + skb->dev = dev; + + /* reset protocol type */ + switch (skb->data[0] & 0xF0) { + case 0x40: + skb->protocol = htons(ETH_P_IP); + break; + case 0x60: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + rmnet_nss_inc_stat(RMNET_NSS_EX_BAD_IP); + goto drop; + } + + rmnet_nss_inc_stat(RMNET_NSS_EX_SUCCESS); + + /* Set this so that we dont loop around netif_receive_skb */ + + skb->cb[0] = 1; + + netif_receive_skb(skb); + return; + +drop: + kfree_skb(skb); +} + +/* Main downlink handler + * Looks up NSS contex associated with the device. If the context is found, + * we add a dummy ethernet header with the approriate protocol field set, + * the pass the packet off to NSS for hardware acceleration. + */ +int rmnet_nss_tx(struct sk_buff *skb) +{ + struct ethhdr *eth; + struct rmnet_nss_ctx *ctx; + struct net_device *dev = skb->dev; + nss_tx_status_t rc; + unsigned int len; + u8 version; + + if (skb_is_nonlinear(skb)) { + if (rmnet_nss_adjust_header(skb)) + goto fail; + else + rmnet_nss_inc_stat(RMNET_NSS_TX_NONLINEAR); + } + + version = ((struct iphdr *)skb->data)->version; + + ctx = rmnet_nss_find_ctx(dev); + if (!ctx) { + rmnet_nss_inc_stat(RMNET_NSS_TX_NO_CTX); + return -EINVAL; + } + + eth = (struct ethhdr *)skb_push(skb, sizeof(*eth)); + memset(ð->h_dest, 0, ETH_ALEN * 2); + if (version == 4) { + eth->h_proto = htons(ETH_P_IP); + } else if (version == 6) { + eth->h_proto = htons(ETH_P_IPV6); + } else { + rmnet_nss_inc_stat(RMNET_NSS_TX_BAD_IP); + goto fail; + } + + skb->protocol = htons(ETH_P_802_3); + /* Get length including ethhdr */ + len = skb->len; + +transmit: + rc = nss_rmnet_rx_tx_buf(ctx->nss_ctx, skb); + if (rc == NSS_TX_SUCCESS) { + /* Increment rmnet_data device stats. + * Don't call rmnet_data_vnd_rx_fixup() to do this, as + * there's no guarantee the skb pointer is still valid. + */ + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + rmnet_nss_inc_stat(RMNET_NSS_TX_SUCCESS); + return 0; + } else if (rc == NSS_TX_FAILURE_QUEUE) { + rmnet_nss_inc_stat(RMNET_NSS_TX_BUSY_LOOP); + goto transmit; + } else if (rc == NSS_TX_FAILURE_NOT_ENABLED) { + /* New stats */ + rmnet_nss_receive(dev, skb, NULL); + return 0; + } + +fail: + rmnet_nss_inc_stat(RMNET_NSS_TX_FAIL); + kfree_skb(skb); + return 1; +} + +/* Called by NSS in the UL acceleration case. + * We are guaranteed to have an ethernet packet here from the NSS hardware, + * We need to pull the header off and invoke our ndo_start_xmit function + * to handle transmitting the packet to the network stack. + */ +void rmnet_nss_xmit(struct net_device *dev, struct sk_buff *skb) +{ + int rc; + + skb_pull(skb, sizeof(struct ethhdr)); + rmnet_nss_inc_stat(RMNET_NSS_RX_ETH); + + /* Use top-half entry point for the netdev so that we enable QDisc support for RmNet redirect. */ + skb_reset_network_header(skb); + skb->dev = dev; + switch (skb->data[0] & 0xF0) { + case 0x40: + skb->protocol = htons(ETH_P_IP); + break; + case 0x60: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + break; + } + if (rmnet_mark_skb) + rmnet_mark_skb(dev, skb); + + rc = dev_queue_xmit(skb); + if (unlikely(rc != 0)) { + rmnet_nss_inc_stat(RMNET_NSS_RX_BUSY); + } +} + +/* Create and register an NSS context for an rmnet_data device */ +int rmnet_nss_create_vnd(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); + if (!ctx) + return -ENOMEM; + + ctx->rmnet_dev = dev; + ctx->nss_ctx = nss_rmnet_rx_create(dev); + if (!ctx->nss_ctx) { + kfree(ctx); + return -1; + } + + nss_rmnet_rx_register(ctx->nss_ctx, rmnet_nss_receive, dev); + nss_rmnet_rx_xmit_callback_register(ctx->nss_ctx, rmnet_nss_xmit); + hash_add_ptr(rmnet_nss_ctx_hashtable, &ctx->hnode, dev); + return 0; +} + +/* Unregister and destroy the NSS context for an rmnet_data device */ +int rmnet_nss_free_vnd(struct net_device *dev) +{ + struct rmnet_nss_ctx *ctx; + + ctx = rmnet_nss_find_ctx(dev); + rmnet_nss_free_ctx(ctx); + + return 0; +} + +static const struct rmnet_nss_cb rmnet_nss = { + .nss_create = rmnet_nss_create_vnd, + .nss_free = rmnet_nss_free_vnd, + .nss_tx = rmnet_nss_tx, +}; + +int __init rmnet_nss_init(void) +{ + pr_err("%s(): initializing rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, &rmnet_nss); + rmnet_mark_skb = symbol_get(qmi_rmnet_mark_skb); + return 0; +} + +void __exit rmnet_nss_exit(void) +{ + struct hlist_node *tmp; + struct rmnet_nss_ctx *ctx; + int bkt; + + pr_err("%s(): exiting rmnet_nss\n", __func__); + RCU_INIT_POINTER(rmnet_nss_callbacks, NULL); + if (rmnet_mark_skb) + symbol_put(qmi_rmnet_mark_skb); + + /* Tear down all NSS contexts */ + hash_for_each_safe(rmnet_nss_ctx_hashtable, bkt, tmp, ctx, hnode) + rmnet_nss_free_ctx(ctx); +} + +MODULE_LICENSE("GPL v2"); +module_init(rmnet_nss_init); +module_exit(rmnet_nss_exit);