[Arm-dev] [PATCH] net: thunderx: add nic dirver updates from mailine

Tue Apr 11 11:03:59 UTC 2017
Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>

Following patches has been picked up from mainline:

36fa35d net: thunderx: Allow IPv6 frames with zero UDP checksum
78aacb6 net: thunderx: Fix invalid mac addresses for node1 interfaces
18de7ba net: thunderx: Fix LMAC mode debug prints for QSGMII mode
075ad76 net: thunderx: Fix PHY autoneg for SGMII QLM mode
c73e442 net: thunderx: avoid dereferencing xcv when NULL
fff37fd net: thunderx: Leave serdes lane config on 81/83xx to firmware
fff4ffd net: thunderx: Support to configure queue sizes from ethtool
171d87a net: thunderx: Make hfunc variable const type in nicvf_set_rxfh()
60dce04 net: thunderx: Fix error return code in nicvf_open()
bd3ad7d net: thunderx: Fix transmit queue timeout issue
430da20 net: thunderx: Pause frame support
d5b2d7a net: thunderx: Configure RED and backpressure levels
1cc7025 net: thunderx: Add ethtool support for supported ports and link modes.
5271156 net: thunderx: 80xx BGX0 configuration changes

Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
---
 SOURCES/0001-thunderx-nic-updates.patch | 1796 +++++++++++++++++++++++++++++++
 SPECS/kernel-aarch64.spec               |    2 +
 2 files changed, 1798 insertions(+)
 create mode 100644 SOURCES/0001-thunderx-nic-updates.patch

diff --git a/SOURCES/0001-thunderx-nic-updates.patch b/SOURCES/0001-thunderx-nic-updates.patch
new file mode 100644
index 0000000..8f0e3e4
--- /dev/null
+++ b/SOURCES/0001-thunderx-nic-updates.patch
@@ -0,0 +1,1796 @@
+From 61c0e7db4f29bb556cdeed8c6d540a29bff32797 Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham at cavium.com>
+Date: Thu, 24 Nov 2016 14:48:00 +0530
+Subject: [PATCH 01/14] net: thunderx: 80xx BGX0 configuration changes
+
+On 80xx only one lane of DLM0 and DLM1 (of BGX0) can be used
+, so even though lmac count may be 2 but LMAC1 should use
+serdes lane of DLM1. Since it's not possible to distinguish
+80xx from 81xx as PCI devid are same, this patch adds this
+config support by replying on what firmware configures the
+lmacs with.
+
+Signed-off-by: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit 5271156b1a93eeb9a1a48d8a4be5074b3c31a5cd)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 20 +++++++++++++++++---
+ 1 file changed, 17 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index a3f4f83..8e94d9c 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -969,11 +969,25 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
+ 		lmac_set_training(bgx, lmac, lmac->lmacid);
+ 		lmac_set_lane2sds(bgx, lmac);
+ 
+-		/* Set LMAC type of other lmac on same DLM i.e LMAC 1/3 */
+ 		olmac = &bgx->lmac[idx + 1];
+-		olmac->lmac_type = lmac->lmac_type;
++		/*  Check if other LMAC on the same DLM is already configured by
++		 *  firmware, if so use the same config or else set as same, as
++		 *  that of LMAC 0/2.
++		 *  This check is needed as on 80xx only one lane of each of the
++		 *  DLM of BGX0 is used, so have to rely on firmware for
++		 *  distingushing 80xx from 81xx.
++		 */
++		cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG);
++		lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
++		lane_to_sds = (u8)(cmr_cfg & 0xFF);
++		if ((lmac_type == 0) && (lane_to_sds == 0xE4)) {
++			olmac->lmac_type = lmac->lmac_type;
++			lmac_set_lane2sds(bgx, olmac);
++		} else {
++			olmac->lmac_type = lmac_type;
++			olmac->lane_to_sds = lane_to_sds;
++		}
+ 		lmac_set_training(bgx, olmac, olmac->lmacid);
+-		lmac_set_lane2sds(bgx, olmac);
+ 	}
+ }
+ 
+-- 
+2.5.5
+
+
+From 06f5f7f7917c40710e394aaa846097162394d599 Mon Sep 17 00:00:00 2001
+From: Thanneeru Srinivasulu <tsrinivasulu at cavium.com>
+Date: Thu, 24 Nov 2016 14:48:01 +0530
+Subject: [PATCH 02/14] net: thunderx: Add ethtool support for supported ports
+ and link modes.
+
+Signed-off-by: Thanneeru Srinivasulu <tsrinivasulu at cavium.com>
+Signed-off-by: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit 1cc702591bae9f70b1aa2f9cb60241961bc1858a)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/nic.h          |  2 ++
+ drivers/net/ethernet/cavium/thunder/nic_main.c     |  1 +
+ .../net/ethernet/cavium/thunder/nicvf_ethtool.c    | 36 ++++++++++++++++++++--
+ drivers/net/ethernet/cavium/thunder/nicvf_main.c   |  1 +
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.c  |  1 +
+ 5 files changed, 38 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
+index cec0a86..a32ce99 100644
+--- a/drivers/net/ethernet/cavium/thunder/nic.h
++++ b/drivers/net/ethernet/cavium/thunder/nic.h
+@@ -291,6 +291,7 @@ struct nicvf {
+ 	u8			node;
+ 	u8			cpi_alg;
+ 	bool			link_up;
++	u8			mac_type;
+ 	u8			duplex;
+ 	u32			speed;
+ 	bool			tns_mode;
+@@ -445,6 +446,7 @@ struct bgx_stats_msg {
+ /* Physical interface link status */
+ struct bgx_link_status {
+ 	u8    msg;
++	u8    mac_type;
+ 	u8    link_up;
+ 	u8    duplex;
+ 	u32   speed;
+diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
+index aa3563f..4338742 100644
+--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
+@@ -1392,6 +1392,7 @@ static void nic_poll_for_link(struct work_struct *work)
+ 			mbx.link_status.link_up = link.link_up;
+ 			mbx.link_status.duplex = link.duplex;
+ 			mbx.link_status.speed = link.speed;
++			mbx.link_status.mac_type = link.mac_type;
+ 			nic_send_msg_to_vf(nic, vf, &mbx);
+ 		}
+ 	}
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+index 432bf6b..d4d76a7 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+@@ -130,12 +130,42 @@ static int nicvf_get_settings(struct net_device *netdev,
+ 		return 0;
+ 	}
+ 
+-	if (nic->speed <= 1000) {
+-		cmd->port = PORT_MII;
++	switch (nic->speed) {
++	case SPEED_1000:
++		cmd->port = PORT_MII | PORT_TP;
+ 		cmd->autoneg = AUTONEG_ENABLE;
+-	} else {
++		cmd->supported |= SUPPORTED_MII | SUPPORTED_TP;
++		cmd->supported |= SUPPORTED_1000baseT_Full |
++				  SUPPORTED_1000baseT_Half |
++				  SUPPORTED_100baseT_Full  |
++				  SUPPORTED_100baseT_Half  |
++				  SUPPORTED_10baseT_Full   |
++				  SUPPORTED_10baseT_Half;
++		cmd->supported |= SUPPORTED_Autoneg;
++		cmd->advertising |= ADVERTISED_1000baseT_Full |
++				    ADVERTISED_1000baseT_Half |
++				    ADVERTISED_100baseT_Full  |
++				    ADVERTISED_100baseT_Half  |
++				    ADVERTISED_10baseT_Full   |
++				    ADVERTISED_10baseT_Half;
++		break;
++	case SPEED_10000:
++		if (nic->mac_type == BGX_MODE_RXAUI) {
++			cmd->port = PORT_TP;
++			cmd->supported |= SUPPORTED_TP;
++		} else {
++			cmd->port = PORT_FIBRE;
++			cmd->supported |= SUPPORTED_FIBRE;
++		}
++		cmd->autoneg = AUTONEG_DISABLE;
++		cmd->supported |= SUPPORTED_10000baseT_Full;
++		break;
++	case SPEED_40000:
+ 		cmd->port = PORT_FIBRE;
+ 		cmd->autoneg = AUTONEG_DISABLE;
++		cmd->supported |= SUPPORTED_FIBRE;
++		cmd->supported |= SUPPORTED_40000baseCR4_Full;
++		break;
+ 	}
+ 	cmd->duplex = nic->duplex;
+ 	ethtool_cmd_speed_set(cmd, nic->speed);
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+index 4f5c917..8ad63b7 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -221,6 +221,7 @@ static void  nicvf_handle_mbx_intr(struct nicvf *nic)
+ 		nic->link_up = mbx.link_status.link_up;
+ 		nic->duplex = mbx.link_status.duplex;
+ 		nic->speed = mbx.link_status.speed;
++		nic->mac_type = mbx.link_status.mac_type;
+ 		if (nic->link_up) {
+ 			netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
+ 				    nic->netdev->name, nic->speed,
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index 8e94d9c..c8a1256 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -162,6 +162,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
+ 		return;
+ 
+ 	lmac = &bgx->lmac[lmacid];
++	link->mac_type = lmac->lmac_type;
+ 	link->link_up = lmac->link_up;
+ 	link->duplex = lmac->last_duplex;
+ 	link->speed = lmac->last_speed;
+-- 
+2.5.5
+
+
+From 1885df686d94092a69c23add969ba6553fc2aa7e Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham at cavium.com>
+Date: Thu, 24 Nov 2016 14:48:02 +0530
+Subject: [PATCH 03/14] net: thunderx: Configure RED and backpressure levels
+
+This patch enables moving average calculation of Rx pkt's resources
+and configures RED and backpressure levels for both CQ and RBDR.
+Also initialize SQ's CQ_LIMIT properly.
+
+Signed-off-by: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit d5b2d7a7184062c436f9a2b237e77bdb8e06a936)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/nic_main.c     |  9 ++++++++
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.c |  9 ++++++--
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.h | 24 +++++++++++++++++-----
+ drivers/net/ethernet/cavium/thunder/q_struct.h     |  8 ++++++--
+ 4 files changed, 41 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
+index 4338742..089d4b7 100644
+--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
+@@ -937,6 +937,15 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
+ 
+ 	bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
+ 
++	/* Enable moving average calculation.
++	 * Keep the LVL/AVG delay to HW enforced minimum so that, not too many
++	 * packets sneek in between average calculations.
++	 */
++	nic_reg_write(nic, NIC_PF_CQ_AVG_CFG,
++		      (BIT_ULL(20) | 0x2ull << 14 | 0x1));
++	nic_reg_write(nic, NIC_PF_RRM_AVG_CFG,
++		      (BIT_ULL(20) | 0x3ull << 14 | 0x1));
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+index c17acf7..d5eda9d 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -544,14 +544,18 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 	nicvf_send_msg_to_pf(nic, &mbx);
+ 
+ 	mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+-	mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
++	mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
++		     (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) |
++		     (qs->vnic_id << 0);
+ 	nicvf_send_msg_to_pf(nic, &mbx);
+ 
+ 	/* RQ drop config
+ 	 * Enable CQ drop to reserve sufficient CQEs for all tx packets
+ 	 */
+ 	mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+-	mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
++	mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
++		     (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) |
++		     (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8);
+ 	nicvf_send_msg_to_pf(nic, &mbx);
+ 
+ 	if (!nic->sqs_mode && (qidx == 0)) {
+@@ -647,6 +651,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 	sq_cfg.ldwb = 0;
+ 	sq_cfg.qsize = SND_QSIZE;
+ 	sq_cfg.tstmp_bgx_intf = 0;
++	sq_cfg.cq_limit = 0;
+ 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
+ 
+ 	/* Set threshold value for interrupt generation */
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+index 2e3c940..20511f2 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+@@ -85,12 +85,26 @@
+ 
+ #define MAX_CQES_FOR_TX		((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
+ 				 MAX_CQE_PER_PKT_XMIT)
+-/* Calculate number of CQEs to reserve for all SQEs.
+- * Its 1/256th level of CQ size.
+- * '+ 1' to account for pipelining
++
++/* RED and Backpressure levels of CQ for pkt reception
++ * For CQ, level is a measure of emptiness i.e 0x0 means full
++ * eg: For CQ of size 4K, and for pass/drop levels of 128/96
++ * HW accepts pkt if unused CQE >= 2048
++ * RED accepts pkt if unused CQE < 2048 & >= 1536
++ * DROPs pkts if unused CQE < 1536
++ */
++#define RQ_PASS_CQ_LVL		128ULL
++#define RQ_DROP_CQ_LVL		96ULL
++
++/* RED and Backpressure levels of RBDR for pkt reception
++ * For RBDR, level is a measure of fullness i.e 0x0 means empty
++ * eg: For RBDR of size 8K, and for pass/drop levels of 4/0
++ * HW accepts pkt if unused RBs >= 256
++ * RED accepts pkt if unused RBs < 256 & >= 0
++ * DROPs pkts if unused RBs < 0
+  */
+-#define RQ_CQ_DROP		((256 / (CMP_QUEUE_LEN / \
+-				 (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
++#define RQ_PASS_RBDR_LVL	8ULL
++#define RQ_DROP_RBDR_LVL	0ULL
+ 
+ /* Descriptor size in bytes */
+ #define SND_QUEUE_DESC_SIZE	16
+diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
+index 9e6d987..f363472 100644
+--- a/drivers/net/ethernet/cavium/thunder/q_struct.h
++++ b/drivers/net/ethernet/cavium/thunder/q_struct.h
+@@ -624,7 +624,9 @@ struct cq_cfg {
+ 
+ struct sq_cfg {
+ #if defined(__BIG_ENDIAN_BITFIELD)
+-	u64 reserved_20_63:44;
++	u64 reserved_32_63:32;
++	u64 cq_limit:8;
++	u64 reserved_20_23:4;
+ 	u64 ena:1;
+ 	u64 reserved_18_18:1;
+ 	u64 reset:1;
+@@ -642,7 +644,9 @@ struct sq_cfg {
+ 	u64 reset:1;
+ 	u64 reserved_18_18:1;
+ 	u64 ena:1;
+-	u64 reserved_20_63:44;
++	u64 reserved_20_23:4;
++	u64 cq_limit:8;
++	u64 reserved_32_63:32;
+ #endif
+ };
+ 
+-- 
+2.5.5
+
+
+From 2336e358e5ced0f20cacb6a24e233c772ff88eae Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham at cavium.com>
+Date: Thu, 24 Nov 2016 14:48:03 +0530
+Subject: [PATCH 04/14] net: thunderx: Pause frame support
+
+Enable pause frames on both Rx and Tx side, configure pause
+interval e.t.c. Also support for enable/disable pause frames
+on Rx/Tx via ethtool has been added.
+
+Signed-off-by: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit 430da208089ba74ff3d2992d80387c8ea5cabd0e)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/nic.h          | 17 +++++++
+ drivers/net/ethernet/cavium/thunder/nic_main.c     | 27 +++++++++++
+ .../net/ethernet/cavium/thunder/nicvf_ethtool.c    | 51 +++++++++++++++++++++
+ drivers/net/ethernet/cavium/thunder/nicvf_main.c   |  6 +++
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.c  | 53 ++++++++++++++++++++++
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.h  | 12 +++++
+ 6 files changed, 166 insertions(+)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
+index a32ce99..9ec29f9 100644
+--- a/drivers/net/ethernet/cavium/thunder/nic.h
++++ b/drivers/net/ethernet/cavium/thunder/nic.h
+@@ -148,6 +148,12 @@ struct nicvf_rss_info {
+ 	u64 key[RSS_HASH_KEY_SIZE];
+ } ____cacheline_aligned_in_smp;
+ 
++struct nicvf_pfc {
++	u8    autoneg;
++	u8    fc_rx;
++	u8    fc_tx;
++};
++
+ enum rx_stats_reg_offset {
+ 	RX_OCTS = 0x0,
+ 	RX_UCAST = 0x1,
+@@ -297,6 +303,7 @@ struct nicvf {
+ 	bool			tns_mode;
+ 	bool			loopback_supported;
+ 	struct nicvf_rss_info	rss_info;
++	struct nicvf_pfc	pfc;
+ 	struct tasklet_struct	qs_err_task;
+ 	struct work_struct	reset_task;
+ 
+@@ -357,6 +364,7 @@ struct nicvf {
+ #define	NIC_MBOX_MSG_SNICVF_PTR		0x15	/* Send sqet nicvf ptr to PVF */
+ #define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
+ #define	NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17	/* Reset statistics counters */
++#define	NIC_MBOX_MSG_PFC		0x18	/* Pause frame control */
+ #define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
+ #define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */
+ 
+@@ -500,6 +508,14 @@ struct reset_stat_cfg {
+ 	u16   sq_stat_mask;
+ };
+ 
++struct pfc {
++	u8    msg;
++	u8    get; /* Get or set PFC settings */
++	u8    autoneg;
++	u8    fc_rx;
++	u8    fc_tx;
++};
++
+ /* 128 bit shared memory between PF and each VF */
+ union nic_mbx {
+ 	struct { u8 msg; }	msg;
+@@ -518,6 +534,7 @@ union nic_mbx {
+ 	struct nicvf_ptr	nicvf;
+ 	struct set_loopback	lbk;
+ 	struct reset_stat_cfg	reset_stat;
++	struct pfc		pfc;
+ };
+ 
+ #define NIC_NODE_ID_MASK	0x03
+diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
+index 089d4b7..9800a5d 100644
+--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
+@@ -1026,6 +1026,30 @@ static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
+ 	bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
+ }
+ 
++static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
++{
++	int bgx, lmac;
++	struct pfc pfc;
++	union nic_mbx mbx = {};
++
++	if (vf >= nic->num_vf_en)
++		return;
++	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
++	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
++
++	if (cfg->get) {
++		bgx_lmac_get_pfc(nic->node, bgx, lmac, &pfc);
++		mbx.pfc.msg = NIC_MBOX_MSG_PFC;
++		mbx.pfc.autoneg = pfc.autoneg;
++		mbx.pfc.fc_rx = pfc.fc_rx;
++		mbx.pfc.fc_tx = pfc.fc_tx;
++		nic_send_msg_to_vf(nic, vf, &mbx);
++	} else {
++		bgx_lmac_set_pfc(nic->node, bgx, lmac, cfg);
++		nic_mbx_send_ack(nic, vf);
++	}
++}
++
+ /* Interrupt handler to handle mailbox messages from VFs */
+ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
+ {
+@@ -1167,6 +1191,9 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
+ 	case NIC_MBOX_MSG_RESET_STAT_COUNTER:
+ 		ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat);
+ 		break;
++	case NIC_MBOX_MSG_PFC:
++		nic_pause_frame(nic, vf, &mbx.pfc);
++		goto unlock;
+ 	default:
+ 		dev_err(&nic->pdev->dev,
+ 			"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+index d4d76a7..b048241 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+@@ -720,6 +720,55 @@ static int nicvf_set_channels(struct net_device *dev,
+ 	return err;
+ }
+ 
++static void nicvf_get_pauseparam(struct net_device *dev,
++				 struct ethtool_pauseparam *pause)
++{
++	struct nicvf *nic = netdev_priv(dev);
++	union nic_mbx mbx = {};
++
++	/* Supported only for 10G/40G interfaces */
++	if ((nic->mac_type == BGX_MODE_SGMII) ||
++	    (nic->mac_type == BGX_MODE_QSGMII) ||
++	    (nic->mac_type == BGX_MODE_RGMII))
++		return;
++
++	mbx.pfc.msg = NIC_MBOX_MSG_PFC;
++	mbx.pfc.get = 1;
++	if (!nicvf_send_msg_to_pf(nic, &mbx)) {
++		pause->autoneg = nic->pfc.autoneg;
++		pause->rx_pause = nic->pfc.fc_rx;
++		pause->tx_pause = nic->pfc.fc_tx;
++	}
++}
++
++static int nicvf_set_pauseparam(struct net_device *dev,
++				struct ethtool_pauseparam *pause)
++{
++	struct nicvf *nic = netdev_priv(dev);
++	union nic_mbx mbx = {};
++
++	/* Supported only for 10G/40G interfaces */
++	if ((nic->mac_type == BGX_MODE_SGMII) ||
++	    (nic->mac_type == BGX_MODE_QSGMII) ||
++	    (nic->mac_type == BGX_MODE_RGMII))
++		return -EOPNOTSUPP;
++
++	if (pause->autoneg)
++		return -EOPNOTSUPP;
++
++	mbx.pfc.msg = NIC_MBOX_MSG_PFC;
++	mbx.pfc.get = 0;
++	mbx.pfc.fc_rx = pause->rx_pause;
++	mbx.pfc.fc_tx = pause->tx_pause;
++	if (nicvf_send_msg_to_pf(nic, &mbx))
++		return -EAGAIN;
++
++	nic->pfc.fc_rx = pause->rx_pause;
++	nic->pfc.fc_tx = pause->tx_pause;
++
++	return 0;
++}
++
+ static const struct ethtool_ops nicvf_ethtool_ops = {
+ 	.get_settings		= nicvf_get_settings,
+ 	.get_link		= nicvf_get_link,
+@@ -741,6 +790,8 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
+ 	.set_rxfh		= nicvf_set_rxfh,
+ 	.get_channels		= nicvf_get_channels,
+ 	.set_channels		= nicvf_set_channels,
++	.get_pauseparam         = nicvf_get_pauseparam,
++	.set_pauseparam         = nicvf_set_pauseparam,
+ 	.get_ts_info		= ethtool_op_get_ts_info,
+ };
+ 
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+index 8ad63b7..3761e44 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -256,6 +256,12 @@ static void  nicvf_handle_mbx_intr(struct nicvf *nic)
+ 		nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
+ 		nic->pf_acked = true;
+ 		break;
++	case NIC_MBOX_MSG_PFC:
++		nic->pfc.autoneg = mbx.pfc.autoneg;
++		nic->pfc.fc_rx = mbx.pfc.fc_rx;
++		nic->pfc.fc_tx = mbx.pfc.fc_tx;
++		nic->pf_acked = true;
++		break;
+ 	default:
+ 		netdev_err(nic->netdev,
+ 			   "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index c8a1256..1744b4fc 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -213,6 +213,47 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
+ }
+ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+ 
++void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
++{
++	struct pfc *pfc = (struct pfc *)pause;
++	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
++	struct lmac *lmac;
++	u64 cfg;
++
++	if (!bgx)
++		return;
++	lmac = &bgx->lmac[lmacid];
++	if (lmac->is_sgmii)
++		return;
++
++	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
++	pfc->fc_rx = cfg & RX_EN;
++	pfc->fc_tx = cfg & TX_EN;
++	pfc->autoneg = 0;
++}
++EXPORT_SYMBOL(bgx_lmac_get_pfc);
++
++void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
++{
++	struct pfc *pfc = (struct pfc *)pause;
++	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
++	struct lmac *lmac;
++	u64 cfg;
++
++	if (!bgx)
++		return;
++	lmac = &bgx->lmac[lmacid];
++	if (lmac->is_sgmii)
++		return;
++
++	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
++	cfg &= ~(RX_EN | TX_EN);
++	cfg |= (pfc->fc_rx ? RX_EN : 0x00);
++	cfg |= (pfc->fc_tx ? TX_EN : 0x00);
++	bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg);
++}
++EXPORT_SYMBOL(bgx_lmac_set_pfc);
++
+ static void bgx_sgmii_change_link_state(struct lmac *lmac)
+ {
+ 	struct bgx *bgx = lmac->bgx;
+@@ -524,6 +565,18 @@ static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
+ 	cfg |= SMU_TX_CTL_DIC_EN;
+ 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
+ 
++	/* Enable receive and transmission of pause frames */
++	bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) |
++		      BCK_EN | DRP_EN | TX_EN | RX_EN));
++	/* Configure pause time and interval */
++	bgx_reg_write(bgx, lmacid,
++		      BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME);
++	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL);
++	cfg &= ~0xFFFFull;
++	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL,
++		      cfg | (DEFAULT_PAUSE_TIME - 0x1000));
++	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01);
++
+ 	/* take lmac_count into account */
+ 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
+ 	/* max packet size */
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+index 01cc7c8..c18ebfe 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+@@ -27,6 +27,7 @@
+ #define    MAX_BGX_CHANS_PER_LMAC		16
+ #define    MAX_DMAC_PER_LMAC			8
+ #define    MAX_FRAME_SIZE			9216
++#define    DEFAULT_PAUSE_TIME			0xFFFF
+ 
+ #define	   BGX_ID_MASK				0x3
+ 
+@@ -126,7 +127,10 @@
+ #define  SMU_RX_CTL_STATUS			(3ull << 0)
+ #define BGX_SMUX_TX_APPEND		0x20100
+ #define  SMU_TX_APPEND_FCS_D			BIT_ULL(2)
++#define BGX_SMUX_TX_PAUSE_PKT_TIME	0x20110
+ #define BGX_SMUX_TX_MIN_PKT		0x20118
++#define BGX_SMUX_TX_PAUSE_PKT_INTERVAL	0x20120
++#define BGX_SMUX_TX_PAUSE_ZERO		0x20138
+ #define BGX_SMUX_TX_INT			0x20140
+ #define BGX_SMUX_TX_CTL			0x20178
+ #define  SMU_TX_CTL_DIC_EN			BIT_ULL(0)
+@@ -136,6 +140,11 @@
+ #define BGX_SMUX_CTL			0x20200
+ #define  SMU_CTL_RX_IDLE			BIT_ULL(0)
+ #define  SMU_CTL_TX_IDLE			BIT_ULL(1)
++#define	BGX_SMUX_CBFC_CTL		0x20218
++#define	RX_EN					BIT_ULL(0)
++#define	TX_EN					BIT_ULL(1)
++#define	BCK_EN					BIT_ULL(2)
++#define	DRP_EN					BIT_ULL(3)
+ 
+ #define BGX_GMP_PCS_MRX_CTL		0x30000
+ #define	 PCS_MRX_CTL_RST_AN			BIT_ULL(9)
+@@ -207,6 +216,9 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
+ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+ void bgx_lmac_internal_loopback(int node, int bgx_idx,
+ 				int lmac_idx, bool enable);
++void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause);
++void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause);
++
+ void xcv_init_hw(void);
+ void xcv_setup_link(bool link_up, int link_speed);
+ 
+-- 
+2.5.5
+
+
+From 4dcce09dc780208de07a8d955ee25affbb75b041 Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham at cavium.com>
+Date: Thu, 1 Dec 2016 18:24:28 +0530
+Subject: [PATCH 05/14] net: thunderx: Fix transmit queue timeout issue
+
+Transmit queue timeout issue is seen in two cases
+- Due to a race condition btw setting stop_queue at xmit()
+  and checking for stopped_queue in NAPI poll routine, at times
+  transmission from a SQ comes to a halt. This is fixed
+  by using barriers and also added a check for SQ free descriptors,
+  incase SQ is stopped and there are only CQE_RX i.e no CQE_TX.
+- Contrary to an assumption, a HW errata where HW doesn't stop transmission
+  even though there are not enough CQEs available for a CQE_TX is
+  not fixed in T88 pass 2.x. This results in a Qset error with
+  'CQ_WR_FULL' stalling transmission. This is fixed by adjusting
+  RXQ's  RED levels for CQ level such that there is always enough
+  space left for CQE_TXs.
+
+Signed-off-by: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit bd3ad7d3a14b07aeeb4f92abc757672719e2a0eb)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/nicvf_main.c   | 52 ++++++++++++++++++----
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 24 ++--------
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.h | 15 ++++---
+ 3 files changed, 54 insertions(+), 37 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+index 3761e44..06aba60 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -643,6 +643,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
+ 	struct cmp_queue *cq = &qs->cq[cq_idx];
+ 	struct cqe_rx_t *cq_desc;
+ 	struct netdev_queue *txq;
++	struct snd_queue *sq;
+ 	unsigned int tx_pkts = 0, tx_bytes = 0;
+ 
+ 	spin_lock_bh(&cq->lock);
+@@ -708,16 +709,20 @@ loop:
+ 
+ done:
+ 	/* Wakeup TXQ if its stopped earlier due to SQ full */
+-	if (tx_done) {
++	sq = &nic->qs->sq[cq_idx];
++	if (tx_done ||
++	    (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
+ 		netdev = nic->pnicvf->netdev;
+ 		txq = netdev_get_tx_queue(netdev,
+ 					  nicvf_netdev_qidx(nic, cq_idx));
+ 		if (tx_pkts)
+ 			netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+ 
+-		nic = nic->pnicvf;
++		/* To read updated queue and carrier status */
++		smp_mb();
+ 		if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
+-			netif_tx_start_queue(txq);
++			netif_tx_wake_queue(txq);
++			nic = nic->pnicvf;
+ 			this_cpu_inc(nic->drv_stats->txq_wake);
+ 			if (netif_msg_tx_err(nic))
+ 				netdev_warn(netdev,
+@@ -1053,6 +1058,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	struct nicvf *nic = netdev_priv(netdev);
+ 	int qid = skb_get_queue_mapping(skb);
+ 	struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
++	struct nicvf *snic;
++	struct snd_queue *sq;
++	int tmp;
+ 
+ 	/* Check for minimum packet length */
+ 	if (skb->len <= ETH_HLEN) {
+@@ -1060,13 +1068,39 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 		return NETDEV_TX_OK;
+ 	}
+ 
+-	if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
++	snic = nic;
++	/* Get secondary Qset's SQ structure */
++	if (qid >= MAX_SND_QUEUES_PER_QS) {
++		tmp = qid / MAX_SND_QUEUES_PER_QS;
++		snic = (struct nicvf *)nic->snicvf[tmp - 1];
++		if (!snic) {
++			netdev_warn(nic->netdev,
++				    "Secondary Qset#%d's ptr not initialized\n",
++				    tmp - 1);
++			dev_kfree_skb(skb);
++			return NETDEV_TX_OK;
++		}
++		qid = qid % MAX_SND_QUEUES_PER_QS;
++	}
++
++	sq = &snic->qs->sq[qid];
++	if (!netif_tx_queue_stopped(txq) &&
++	    !nicvf_sq_append_skb(snic, sq, skb, qid)) {
+ 		netif_tx_stop_queue(txq);
+-		this_cpu_inc(nic->drv_stats->txq_stop);
+-		if (netif_msg_tx_err(nic))
+-			netdev_warn(netdev,
+-				    "%s: Transmit ring full, stopping SQ%d\n",
+-				    netdev->name, qid);
++
++		/* Barrier, so that stop_queue visible to other cpus */
++		smp_mb();
++
++		/* Check again, incase another cpu freed descriptors */
++		if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) {
++			netif_tx_wake_queue(txq);
++		} else {
++			this_cpu_inc(nic->drv_stats->txq_stop);
++			if (netif_msg_tx_err(nic))
++				netdev_warn(netdev,
++					    "%s: Transmit ring full, stopping SQ%d\n",
++					    netdev->name, qid);
++		}
+ 		return NETDEV_TX_BUSY;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+index d5eda9d..8071864 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -1187,30 +1187,12 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
+ }
+ 
+ /* Append an skb to a SQ for packet transfer. */
+-int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
++int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
++			struct sk_buff *skb, u8 sq_num)
+ {
+ 	int i, size;
+ 	int subdesc_cnt, tso_sqe = 0;
+-	int sq_num, qentry;
+-	struct queue_set *qs;
+-	struct snd_queue *sq;
+-
+-	sq_num = skb_get_queue_mapping(skb);
+-	if (sq_num >= MAX_SND_QUEUES_PER_QS) {
+-		/* Get secondary Qset's SQ structure */
+-		i = sq_num / MAX_SND_QUEUES_PER_QS;
+-		if (!nic->snicvf[i - 1]) {
+-			netdev_warn(nic->netdev,
+-				    "Secondary Qset#%d's ptr not initialized\n",
+-				    i - 1);
+-			return 1;
+-		}
+-		nic = (struct nicvf *)nic->snicvf[i - 1];
+-		sq_num = sq_num % MAX_SND_QUEUES_PER_QS;
+-	}
+-
+-	qs = nic->qs;
+-	sq = &qs->sq[sq_num];
++	int qentry;
+ 
+ 	subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
+ 	if (subdesc_cnt > atomic_read(&sq->free_cnt))
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+index 20511f2..9e21046 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+@@ -88,13 +88,13 @@
+ 
+ /* RED and Backpressure levels of CQ for pkt reception
+  * For CQ, level is a measure of emptiness i.e 0x0 means full
+- * eg: For CQ of size 4K, and for pass/drop levels of 128/96
+- * HW accepts pkt if unused CQE >= 2048
+- * RED accepts pkt if unused CQE < 2048 & >= 1536
+- * DROPs pkts if unused CQE < 1536
++ * eg: For CQ of size 4K, and for pass/drop levels of 160/144
++ * HW accepts pkt if unused CQE >= 2560
++ * RED accepts pkt if unused CQE < 2304 & >= 2560
++ * DROPs pkts if unused CQE < 2304
+  */
+-#define RQ_PASS_CQ_LVL		128ULL
+-#define RQ_DROP_CQ_LVL		96ULL
++#define RQ_PASS_CQ_LVL		160ULL
++#define RQ_DROP_CQ_LVL		144ULL
+ 
+ /* RED and Backpressure levels of RBDR for pkt reception
+  * For RBDR, level is a measure of fullness i.e 0x0 means empty
+@@ -306,7 +306,8 @@ void nicvf_sq_disable(struct nicvf *nic, int qidx);
+ void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
+ void nicvf_sq_free_used_descs(struct net_device *netdev,
+ 			      struct snd_queue *sq, int qidx);
+-int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
++int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
++			struct sk_buff *skb, u8 sq_num);
+ 
+ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
+ void nicvf_rbdr_task(unsigned long data);
+-- 
+2.5.5
+
+
+From ff39248cdad9577f0a344a00c2c83b4fa5b7b69e Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <weiyongjun1 at huawei.com>
+Date: Wed, 11 Jan 2017 16:32:51 +0000
+Subject: [PATCH 06/14] net: thunderx: Fix error return code in nicvf_open()
+
+Fix to return a negative error code from the error handling
+case instead of 0, as done elsewhere in this function.
+
+Fixes: 712c31853440 ("net: thunderx: Program LMAC credits based on MTU")
+Signed-off-by: Wei Yongjun <weiyongjun1 at huawei.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit 60dce04b81424940a3183c8e7e81e1234a27e906)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/nicvf_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+index 06aba60..7a0bc52 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+@@ -1273,7 +1273,8 @@ int nicvf_open(struct net_device *netdev)
+ 	/* Configure receive side scaling and MTU */
+ 	if (!nic->sqs_mode) {
+ 		nicvf_rss_init(nic);
+-		if (nicvf_update_hw_max_frs(nic, netdev->mtu))
++		err = nicvf_update_hw_max_frs(nic, netdev->mtu);
++		if (err)
+ 			goto cleanup;
+ 
+ 		/* Clear percpu stats */
+-- 
+2.5.5
+
+
+From 3df3815a3942d7a6821ed7098f7c463da3001fd9 Mon Sep 17 00:00:00 2001
+From: Robert Richter <rrichter at cavium.com>
+Date: Wed, 11 Jan 2017 18:04:32 +0100
+Subject: [PATCH 07/14] net: thunderx: Make hfunc variable const type in
+ nicvf_set_rxfh()
+
+>From struct ethtool_ops:
+
+        int     (*set_rxfh)(struct net_device *, const u32 *indir,
+                            const u8 *key, const u8 hfunc);
+
+Change function arg of hfunc to const type.
+
+V2: Fixed indentation.
+
+Signed-off-by: Robert Richter <rrichter at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit 171d87aca0da1fab6a15b96ad8e298216a5951b0)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+index b048241..72c7d1f 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+@@ -629,7 +629,7 @@ static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
+ }
+ 
+ static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
+-			  const u8 *hkey, u8 hfunc)
++			  const u8 *hkey, const u8 hfunc)
+ {
+ 	struct nicvf *nic = netdev_priv(dev);
+ 	struct nicvf_rss_info *rss = &nic->rss_info;
+-- 
+2.5.5
+
+
+From 6c10f2f37ca625a957ad893ee713d970306b9aef Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham at cavium.com>
+Date: Wed, 25 Jan 2017 17:36:23 +0530
+Subject: [PATCH 08/14] net: thunderx: Support to configure queue sizes from
+ ethtool
+
+Adds support to set Rx/Tx queue sizes from ethtool. Fixes
+an issue with retrieving queue size. Also sets SQ's CQ_LIMIT
+based on configured Tx queue size such that HW doesn't process
+SQEs when there is no sufficient space in CQ.
+
+Signed-off-by: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit fff4ffdde175bfa4516394db95ae56153224664b)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ .../net/ethernet/cavium/thunder/nicvf_ethtool.c    | 39 ++++++++++++++++++++--
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 19 +++++++++--
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.h | 16 ++++++---
+ 3 files changed, 65 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+index 72c7d1f..1c82700 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+@@ -465,12 +465,46 @@ static void nicvf_get_ringparam(struct net_device *netdev,
+ 	struct nicvf *nic = netdev_priv(netdev);
+ 	struct queue_set *qs = nic->qs;
+ 
+-	ring->rx_max_pending = MAX_RCV_BUF_COUNT;
+-	ring->rx_pending = qs->rbdr_len;
++	ring->rx_max_pending = MAX_CMP_QUEUE_LEN;
++	ring->rx_pending = qs->cq_len;
+ 	ring->tx_max_pending = MAX_SND_QUEUE_LEN;
+ 	ring->tx_pending = qs->sq_len;
+ }
+ 
++static int nicvf_set_ringparam(struct net_device *netdev,
++			       struct ethtool_ringparam *ring)
++{
++	struct nicvf *nic = netdev_priv(netdev);
++	struct queue_set *qs = nic->qs;
++	u32 rx_count, tx_count;
++
++	/* Due to HW errata this is not supported on T88 pass 1.x silicon */
++	if (pass1_silicon(nic->pdev))
++		return -EINVAL;
++
++	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
++		return -EINVAL;
++
++	tx_count = clamp_t(u32, ring->tx_pending,
++			   MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN);
++	rx_count = clamp_t(u32, ring->rx_pending,
++			   MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN);
++
++	if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len))
++		return 0;
++
++	/* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */
++	qs->sq_len = rounddown_pow_of_two(tx_count);
++	qs->cq_len = rounddown_pow_of_two(rx_count);
++
++	if (netif_running(netdev)) {
++		nicvf_stop(netdev);
++		nicvf_open(netdev);
++	}
++
++	return 0;
++}
++
+ static int nicvf_get_rss_hash_opts(struct nicvf *nic,
+ 				   struct ethtool_rxnfc *info)
+ {
+@@ -782,6 +816,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
+ 	.get_regs		= nicvf_get_regs,
+ 	.get_coalesce		= nicvf_get_coalesce,
+ 	.get_ringparam		= nicvf_get_ringparam,
++	.set_ringparam		= nicvf_set_ringparam,
+ 	.get_rxnfc		= nicvf_get_rxnfc,
+ 	.set_rxnfc		= nicvf_set_rxnfc,
+ 	.get_rxfh_key_size	= nicvf_get_rxfh_key_size,
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+index 8071864..142c045 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -601,7 +601,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 	cq_cfg.ena = 1;
+ 	cq_cfg.reset = 0;
+ 	cq_cfg.caching = 0;
+-	cq_cfg.qsize = CMP_QSIZE;
++	cq_cfg.qsize = ilog2(qs->cq_len >> 10);
+ 	cq_cfg.avg_con = 0;
+ 	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
+ 
+@@ -649,9 +649,12 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 	sq_cfg.ena = 1;
+ 	sq_cfg.reset = 0;
+ 	sq_cfg.ldwb = 0;
+-	sq_cfg.qsize = SND_QSIZE;
++	sq_cfg.qsize = ilog2(qs->sq_len >> 10);
+ 	sq_cfg.tstmp_bgx_intf = 0;
+-	sq_cfg.cq_limit = 0;
++	/* CQ's level at which HW will stop processing SQEs to avoid
++	 * transmitting a pkt with no space in CQ to post CQE_TX.
++	 */
++	sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
+ 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
+ 
+ 	/* Set threshold value for interrupt generation */
+@@ -812,11 +815,21 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
+ {
+ 	bool disable = false;
+ 	struct queue_set *qs = nic->qs;
++	struct queue_set *pqs = nic->pnicvf->qs;
+ 	int qidx;
+ 
+ 	if (!qs)
+ 		return 0;
+ 
++	/* Take primary VF's queue lengths.
++	 * This is needed to take queue lengths set from ethtool
++	 * into consideration.
++	 */
++	if (nic->sqs_mode && pqs) {
++		qs->cq_len = pqs->cq_len;
++		qs->sq_len = pqs->sq_len;
++	}
++
+ 	if (enable) {
+ 		if (nicvf_alloc_resources(nic))
+ 			return -ENOMEM;
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+index 9e21046..5cb84da 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+@@ -59,8 +59,9 @@
+ /* Default queue count per QS, its lengths and threshold values */
+ #define DEFAULT_RBDR_CNT	1
+ 
+-#define SND_QSIZE		SND_QUEUE_SIZE2
++#define SND_QSIZE		SND_QUEUE_SIZE0
+ #define SND_QUEUE_LEN		(1ULL << (SND_QSIZE + 10))
++#define MIN_SND_QUEUE_LEN	(1ULL << (SND_QUEUE_SIZE0 + 10))
+ #define MAX_SND_QUEUE_LEN	(1ULL << (SND_QUEUE_SIZE6 + 10))
+ #define SND_QUEUE_THRESH	2ULL
+ #define MIN_SQ_DESC_PER_PKT_XMIT	2
+@@ -70,11 +71,18 @@
+ /* Keep CQ and SQ sizes same, if timestamping
+  * is enabled this equation will change.
+  */
+-#define CMP_QSIZE		CMP_QUEUE_SIZE2
++#define CMP_QSIZE		CMP_QUEUE_SIZE0
+ #define CMP_QUEUE_LEN		(1ULL << (CMP_QSIZE + 10))
++#define MIN_CMP_QUEUE_LEN	(1ULL << (CMP_QUEUE_SIZE0 + 10))
++#define MAX_CMP_QUEUE_LEN	(1ULL << (CMP_QUEUE_SIZE6 + 10))
+ #define CMP_QUEUE_CQE_THRESH	(NAPI_POLL_WEIGHT / 2)
+ #define CMP_QUEUE_TIMER_THRESH	80 /* ~2usec */
+ 
++/* No of CQEs that might anyway gets used by HW due to pipelining
++ * effects irrespective of PASS/DROP/LEVELS being configured
++ */
++#define CMP_QUEUE_PIPELINE_RSVD 544
++
+ #define RBDR_SIZE		RBDR_SIZE0
+ #define RCV_BUF_COUNT		(1ULL << (RBDR_SIZE + 13))
+ #define MAX_RCV_BUF_COUNT	(1ULL << (RBDR_SIZE6 + 13))
+@@ -93,8 +101,8 @@
+  * RED accepts pkt if unused CQE < 2304 & >= 2560
+  * DROPs pkts if unused CQE < 2304
+  */
+-#define RQ_PASS_CQ_LVL		160ULL
+-#define RQ_DROP_CQ_LVL		144ULL
++#define RQ_PASS_CQ_LVL         192ULL
++#define RQ_DROP_CQ_LVL         184ULL
+ 
+ /* RED and Backpressure levels of RBDR for pkt reception
+  * For RBDR, level is a measure of fullness i.e 0x0 means empty
+-- 
+2.5.5
+
+
+From 1108842a9601ca29806c04b0b9652f8afc63db84 Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham at cavium.com>
+Date: Wed, 25 Jan 2017 17:36:24 +0530
+Subject: [PATCH 09/14] net: thunderx: Leave serdes lane config on 81/83xx to
+ firmware
+
+For DLMs and SLMs on 80/81/83xx, many lane configurations
+across different boards are coming up. Also kernel doesn't have
+any way to identify board type/info and since firmware does,
+just get rid of figuring out lane to serdes config and take
+whatever has been programmed by low level firmware.
+
+Signed-off-by: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit fff37fdad9df3b214294ef83943d92aa9e1c7ecc)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 95 +++++------------------
+ 1 file changed, 18 insertions(+), 77 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index 1744b4fc..aa5836c 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -892,17 +892,15 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
+ 	struct device *dev = &bgx->pdev->dev;
+ 	struct lmac *lmac;
+ 	char str[20];
+-	u8 dlm;
+ 
+-	if (lmacid > bgx->max_lmac)
++	if (!bgx->is_dlm && lmacid)
+ 		return;
+ 
+ 	lmac = &bgx->lmac[lmacid];
+-	dlm = (lmacid / 2) + (bgx->bgx_id * 2);
+ 	if (!bgx->is_dlm)
+ 		sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
+ 	else
+-		sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm);
++		sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid);
+ 
+ 	switch (lmac->lmac_type) {
+ 	case BGX_MODE_SGMII:
+@@ -988,7 +986,6 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
+ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
+ {
+ 	struct lmac *lmac;
+-	struct lmac *olmac;
+ 	u64 cmr_cfg;
+ 	u8 lmac_type;
+ 	u8 lane_to_sds;
+@@ -1008,62 +1005,26 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
+ 		return;
+ 	}
+ 
+-	/* On 81xx BGX can be split across 2 DLMs
+-	 * firmware programs lmac_type of LMAC0 and LMAC2
++	/* For DLMs or SLMs on 80/81/83xx so many lane configurations
++	 * are possible and vary across boards. Also Kernel doesn't have
++	 * any way to identify board type/info and since firmware does,
++	 * just take lmac type and serdes lane config as is.
+ 	 */
+-	if ((idx == 0) || (idx == 2)) {
+-		cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
+-		lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
+-		lane_to_sds = (u8)(cmr_cfg & 0xFF);
+-		/* Check if config is not reset value */
+-		if ((lmac_type == 0) && (lane_to_sds == 0xE4))
+-			lmac->lmac_type = BGX_MODE_INVALID;
+-		else
+-			lmac->lmac_type = lmac_type;
+-		lmac_set_training(bgx, lmac, lmac->lmacid);
+-		lmac_set_lane2sds(bgx, lmac);
+-
+-		olmac = &bgx->lmac[idx + 1];
+-		/*  Check if other LMAC on the same DLM is already configured by
+-		 *  firmware, if so use the same config or else set as same, as
+-		 *  that of LMAC 0/2.
+-		 *  This check is needed as on 80xx only one lane of each of the
+-		 *  DLM of BGX0 is used, so have to rely on firmware for
+-		 *  distingushing 80xx from 81xx.
+-		 */
+-		cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG);
+-		lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
+-		lane_to_sds = (u8)(cmr_cfg & 0xFF);
+-		if ((lmac_type == 0) && (lane_to_sds == 0xE4)) {
+-			olmac->lmac_type = lmac->lmac_type;
+-			lmac_set_lane2sds(bgx, olmac);
+-		} else {
+-			olmac->lmac_type = lmac_type;
+-			olmac->lane_to_sds = lane_to_sds;
+-		}
+-		lmac_set_training(bgx, olmac, olmac->lmacid);
+-	}
+-}
+-
+-static bool is_dlm0_in_bgx_mode(struct bgx *bgx)
+-{
+-	struct lmac *lmac;
+-
+-	if (!bgx->is_dlm)
+-		return true;
+-
+-	lmac = &bgx->lmac[0];
+-	if (lmac->lmac_type == BGX_MODE_INVALID)
+-		return false;
+-
+-	return true;
++	cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
++	lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
++	lane_to_sds = (u8)(cmr_cfg & 0xFF);
++	/* Check if config is reset value */
++	if ((lmac_type == 0) && (lane_to_sds == 0xE4))
++		lmac->lmac_type = BGX_MODE_INVALID;
++	else
++		lmac->lmac_type = lmac_type;
++	lmac->lane_to_sds = lane_to_sds;
++	lmac_set_training(bgx, lmac, lmac->lmacid);
+ }
+ 
+ static void bgx_get_qlm_mode(struct bgx *bgx)
+ {
+ 	struct lmac *lmac;
+-	struct lmac *lmac01;
+-	struct lmac *lmac23;
+ 	u8  idx;
+ 
+ 	/* Init all LMAC's type to invalid */
+@@ -1079,29 +1040,9 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
+ 	if (bgx->lmac_count > bgx->max_lmac)
+ 		bgx->lmac_count = bgx->max_lmac;
+ 
+-	for (idx = 0; idx < bgx->max_lmac; idx++)
+-		bgx_set_lmac_config(bgx, idx);
+-
+-	if (!bgx->is_dlm || bgx->is_rgx) {
+-		bgx_print_qlm_mode(bgx, 0);
+-		return;
+-	}
+-
+-	if (bgx->lmac_count) {
+-		bgx_print_qlm_mode(bgx, 0);
+-		bgx_print_qlm_mode(bgx, 2);
+-	}
+-
+-	/* If DLM0 is not in BGX mode then LMAC0/1 have
+-	 * to be configured with serdes lanes of DLM1
+-	 */
+-	if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2))
+-		return;
+ 	for (idx = 0; idx < bgx->lmac_count; idx++) {
+-		lmac01 = &bgx->lmac[idx];
+-		lmac23 = &bgx->lmac[idx + 2];
+-		lmac01->lmac_type = lmac23->lmac_type;
+-		lmac01->lane_to_sds = lmac23->lane_to_sds;
++		bgx_set_lmac_config(bgx, idx);
++		bgx_print_qlm_mode(bgx, idx);
+ 	}
+ }
+ 
+-- 
+2.5.5
+
+
+From 4d3d951bfbbcb4938fa29d1756e8ae8e011802e2 Mon Sep 17 00:00:00 2001
+From: Vincent <vincent.stehle at laposte.net>
+Date: Mon, 30 Jan 2017 15:06:43 +0100
+Subject: [PATCH 10/14] net: thunderx: avoid dereferencing xcv when NULL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This fixes the following smatch and coccinelle warnings:
+
+  drivers/net/ethernet/cavium/thunder/thunder_xcv.c:119 xcv_setup_link() error: we previously assumed 'xcv' could be null (see line 118) [smatch]
+  drivers/net/ethernet/cavium/thunder/thunder_xcv.c:119:16-20: ERROR: xcv is NULL but dereferenced. [coccinelle]
+
+Fixes: 6465859aba1e66a5 ("net: thunderx: Add RGMII interface type support")
+Signed-off-by: Vincent Stehlé <vincent.stehle at laposte.net>
+Cc: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit c73e44269369e936165f0f9b61f1f09a11dae01c)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/thunder_xcv.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+index 67befed..578c7f8 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
+ 	int speed = 2;
+ 
+ 	if (!xcv) {
+-		dev_err(&xcv->pdev->dev,
+-			"XCV init not done, probe may have failed\n");
++		pr_err("XCV init not done, probe may have failed\n");
+ 		return;
+ 	}
+ 
+-- 
+2.5.5
+
+
+From b26339185447e850da6e19db1df0d9d0cdc09419 Mon Sep 17 00:00:00 2001
+From: Thanneeru Srinivasulu <tsrinivasulu at cavium.com>
+Date: Wed, 8 Feb 2017 18:09:00 +0530
+Subject: [PATCH 11/14] net: thunderx: Fix PHY autoneg for SGMII QLM mode
+
+This patch fixes the case where there is no phydev attached
+to a LMAC in DT due to non-existance of a PHY driver or due
+to usage of non-stanadard PHY which doesn't support autoneg.
+Changes dependeds on firmware to send correct info w.r.t
+PHY and autoneg capability.
+
+This patch also covers a case where a 10G/40G interface is used
+as a 1G with convertors with Cortina PHY in between.
+
+Signed-off-by: Thanneeru Srinivasulu <tsrinivasulu at cavium.com>
+Signed-off-by: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit 075ad765ef7541b2860de8408c165a92b78aefa3)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 108 +++++++++++++++++++---
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.h |   5 +
+ 2 files changed, 101 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index aa5836c..6eadbd7 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -31,6 +31,7 @@ struct lmac {
+ 	u8                      lmac_type;
+ 	u8                      lane_to_sds;
+ 	bool                    use_training;
++	bool                    autoneg;
+ 	bool			link_up;
+ 	int			lmacid; /* ID within BGX */
+ 	int			lmacid_bd; /* ID on board */
+@@ -459,7 +460,17 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
+ 	/* power down, reset autoneg, autoneg enable */
+ 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+ 	cfg &= ~PCS_MRX_CTL_PWR_DN;
+-	cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
++	cfg |= PCS_MRX_CTL_RST_AN;
++	if (lmac->phydev) {
++		cfg |= PCS_MRX_CTL_AN_EN;
++	} else {
++		/* In scenarios where PHY driver is not present or it's a
++		 * non-standard PHY, FW sets AN_EN to inform Linux driver
++		 * to do auto-neg and link polling or not.
++		 */
++		if (cfg & PCS_MRX_CTL_AN_EN)
++			lmac->autoneg = true;
++	}
+ 	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+ 
+ 	if (lmac->lmac_type == BGX_MODE_QSGMII) {
+@@ -470,7 +481,7 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
+ 		return 0;
+ 	}
+ 
+-	if (lmac->lmac_type == BGX_MODE_SGMII) {
++	if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
+ 		if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+ 				 PCS_MRX_STATUS_AN_CPT, false)) {
+ 			dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
+@@ -676,12 +687,71 @@ static int bgx_xaui_check_link(struct lmac *lmac)
+ 	return -1;
+ }
+ 
++static void bgx_poll_for_sgmii_link(struct lmac *lmac)
++{
++	u64 pcs_link, an_result;
++	u8 speed;
++
++	pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
++				BGX_GMP_PCS_MRX_STATUS);
++
++	/*Link state bit is sticky, read it again*/
++	if (!(pcs_link & PCS_MRX_STATUS_LINK))
++		pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
++					BGX_GMP_PCS_MRX_STATUS);
++
++	if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
++			 PCS_MRX_STATUS_AN_CPT, false)) {
++		lmac->link_up = false;
++		lmac->last_speed = SPEED_UNKNOWN;
++		lmac->last_duplex = DUPLEX_UNKNOWN;
++		goto next_poll;
++	}
++
++	lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
++	an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
++				 BGX_GMP_PCS_ANX_AN_RESULTS);
++
++	speed = (an_result >> 3) & 0x3;
++	lmac->last_duplex = (an_result >> 1) & 0x1;
++	switch (speed) {
++	case 0:
++		lmac->last_speed = 10;
++		break;
++	case 1:
++		lmac->last_speed = 100;
++		break;
++	case 2:
++		lmac->last_speed = 1000;
++		break;
++	default:
++		lmac->link_up = false;
++		lmac->last_speed = SPEED_UNKNOWN;
++		lmac->last_duplex = DUPLEX_UNKNOWN;
++		break;
++	}
++
++next_poll:
++
++	if (lmac->last_link != lmac->link_up) {
++		if (lmac->link_up)
++			bgx_sgmii_change_link_state(lmac);
++		lmac->last_link = lmac->link_up;
++	}
++
++	queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
++}
++
+ static void bgx_poll_for_link(struct work_struct *work)
+ {
+ 	struct lmac *lmac;
+ 	u64 spu_link, smu_link;
+ 
+ 	lmac = container_of(work, struct lmac, dwork.work);
++	if (lmac->is_sgmii) {
++		bgx_poll_for_sgmii_link(lmac);
++		return;
++	}
+ 
+ 	/* Receive link is latching low. Force it high and verify it */
+ 	bgx_reg_modify(lmac->bgx, lmac->lmacid,
+@@ -773,9 +843,21 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
+ 	    (lmac->lmac_type != BGX_MODE_XLAUI) &&
+ 	    (lmac->lmac_type != BGX_MODE_40G_KR) &&
+ 	    (lmac->lmac_type != BGX_MODE_10G_KR)) {
+-		if (!lmac->phydev)
+-			return -ENODEV;
+-
++		if (!lmac->phydev) {
++			if (lmac->autoneg) {
++				bgx_reg_write(bgx, lmacid,
++					      BGX_GMP_PCS_LINKX_TIMER,
++					      PCS_LINKX_TIMER_COUNT);
++				goto poll;
++			} else {
++				/* Default to below link speed and duplex */
++				lmac->link_up = true;
++				lmac->last_speed = 1000;
++				lmac->last_duplex = 1;
++				bgx_sgmii_change_link_state(lmac);
++				return 0;
++			}
++		}
+ 		lmac->phydev->dev_flags = 0;
+ 
+ 		if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+@@ -784,15 +866,17 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
+ 			return -ENODEV;
+ 
+ 		phy_start_aneg(lmac->phydev);
+-	} else {
+-		lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+-						   WQ_MEM_RECLAIM, 1);
+-		if (!lmac->check_link)
+-			return -ENOMEM;
+-		INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+-		queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
++		return 0;
+ 	}
+ 
++poll:
++	lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
++					   WQ_MEM_RECLAIM, 1);
++	if (!lmac->check_link)
++		return -ENOMEM;
++	INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
++	queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+index c18ebfe..a60f189 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+@@ -153,10 +153,15 @@
+ #define	 PCS_MRX_CTL_LOOPBACK1			BIT_ULL(14)
+ #define	 PCS_MRX_CTL_RESET			BIT_ULL(15)
+ #define BGX_GMP_PCS_MRX_STATUS		0x30008
++#define	 PCS_MRX_STATUS_LINK			BIT_ULL(2)
+ #define	 PCS_MRX_STATUS_AN_CPT			BIT_ULL(5)
++#define BGX_GMP_PCS_ANX_ADV		0x30010
+ #define BGX_GMP_PCS_ANX_AN_RESULTS	0x30020
++#define BGX_GMP_PCS_LINKX_TIMER		0x30040
++#define PCS_LINKX_TIMER_COUNT			0x1E84
+ #define BGX_GMP_PCS_SGM_AN_ADV		0x30068
+ #define BGX_GMP_PCS_MISCX_CTL		0x30078
++#define  PCS_MISC_CTL_MODE			BIT_ULL(8)
+ #define  PCS_MISC_CTL_DISP_EN			BIT_ULL(13)
+ #define  PCS_MISC_CTL_GMX_ENO			BIT_ULL(11)
+ #define  PCS_MISC_CTL_SAMP_PT_MASK	0x7Full
+-- 
+2.5.5
+
+
+From 6f831cdf29fa390612b2bcc48ebd002452959696 Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham at cavium.com>
+Date: Tue, 7 Mar 2017 18:09:09 +0530
+Subject: [PATCH 12/14] net: thunderx: Fix LMAC mode debug prints for QSGMII
+ mode
+
+When BGX/LMACs are in QSGMII mode, for some LMACs, mode info is
+not being printed. This patch will fix that. With changes already
+done to not do any sort of serdes 2 lane mapping config calculation
+in kernel driver, we can get rid of this logic.
+
+Signed-off-by: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit 18de7ba95f6e5ab150e482618123d92ee2240dc0)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index 6eadbd7..e3223fa 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -1009,12 +1009,6 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
+ 			dev_info(dev, "%s: 40G_KR4\n", (char *)str);
+ 		break;
+ 	case BGX_MODE_QSGMII:
+-		if ((lmacid == 0) &&
+-		    (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
+-			return;
+-		if ((lmacid == 2) &&
+-		    (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
+-			return;
+ 		dev_info(dev, "%s: QSGMII\n", (char *)str);
+ 		break;
+ 	case BGX_MODE_RGMII:
+-- 
+2.5.5
+
+
+From bf375972be4b651de342b43cab0c908d0eb733be Mon Sep 17 00:00:00 2001
+From: Sunil Goutham <sgoutham at cavium.com>
+Date: Tue, 7 Mar 2017 18:09:10 +0530
+Subject: [PATCH 13/14] net: thunderx: Fix invalid mac addresses for node1
+ interfaces
+
+When booted with ACPI, random mac addresses are being
+assigned to node1 interfaces due to mismatch of bgx_id
+in BGX driver and ACPI tables.
+
+This patch fixes this issue by setting maximum BGX devices
+per node based on platform/soc instead of a macro. This
+change will set the bgx_id appropriately.
+
+Signed-off-by: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit 78aacb6f6eeea3c581a29b4a50438d0bdf85ad0b)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 58 ++++++++++++++++++-----
+ drivers/net/ethernet/cavium/thunder/thunder_bgx.h |  1 -
+ 2 files changed, 45 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+index e3223fa..898a684 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+@@ -123,14 +123,44 @@ static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
+ 	return 1;
+ }
+ 
++static int max_bgx_per_node;
++static void set_max_bgx_per_node(struct pci_dev *pdev)
++{
++	u16 sdevid;
++
++	if (max_bgx_per_node)
++		return;
++
++	pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
++	switch (sdevid) {
++	case PCI_SUBSYS_DEVID_81XX_BGX:
++		max_bgx_per_node = MAX_BGX_PER_CN81XX;
++		break;
++	case PCI_SUBSYS_DEVID_83XX_BGX:
++		max_bgx_per_node = MAX_BGX_PER_CN83XX;
++		break;
++	case PCI_SUBSYS_DEVID_88XX_BGX:
++	default:
++		max_bgx_per_node = MAX_BGX_PER_CN88XX;
++		break;
++	}
++}
++
++static struct bgx *get_bgx(int node, int bgx_idx)
++{
++	int idx = (node * max_bgx_per_node) + bgx_idx;
++
++	return bgx_vnic[idx];
++}
++
+ /* Return number of BGX present in HW */
+ unsigned bgx_get_map(int node)
+ {
+ 	int i;
+ 	unsigned map = 0;
+ 
+-	for (i = 0; i < MAX_BGX_PER_NODE; i++) {
+-		if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
++	for (i = 0; i < max_bgx_per_node; i++) {
++		if (bgx_vnic[(node * max_bgx_per_node) + i])
+ 			map |= (1 << i);
+ 	}
+ 
+@@ -143,7 +173,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
+ {
+ 	struct bgx *bgx;
+ 
+-	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
++	bgx = get_bgx(node, bgx_idx);
+ 	if (bgx)
+ 		return bgx->lmac_count;
+ 
+@@ -158,7 +188,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
+ 	struct bgx *bgx;
+ 	struct lmac *lmac;
+ 
+-	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
++	bgx = get_bgx(node, bgx_idx);
+ 	if (!bgx)
+ 		return;
+ 
+@@ -172,7 +202,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
+ 
+ const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
+ {
+-	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
++	struct bgx *bgx = get_bgx(node, bgx_idx);
+ 
+ 	if (bgx)
+ 		return bgx->lmac[lmacid].mac;
+@@ -183,7 +213,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
+ 
+ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
+ {
+-	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
++	struct bgx *bgx = get_bgx(node, bgx_idx);
+ 
+ 	if (!bgx)
+ 		return;
+@@ -194,7 +224,7 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
+ 
+ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
+ {
+-	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
++	struct bgx *bgx = get_bgx(node, bgx_idx);
+ 	struct lmac *lmac;
+ 	u64 cfg;
+ 
+@@ -217,7 +247,7 @@ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+ void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
+ {
+ 	struct pfc *pfc = (struct pfc *)pause;
+-	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
++	struct bgx *bgx = get_bgx(node, bgx_idx);
+ 	struct lmac *lmac;
+ 	u64 cfg;
+ 
+@@ -237,7 +267,7 @@ EXPORT_SYMBOL(bgx_lmac_get_pfc);
+ void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
+ {
+ 	struct pfc *pfc = (struct pfc *)pause;
+-	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
++	struct bgx *bgx = get_bgx(node, bgx_idx);
+ 	struct lmac *lmac;
+ 	u64 cfg;
+ 
+@@ -367,7 +397,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
+ {
+ 	struct bgx *bgx;
+ 
+-	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
++	bgx = get_bgx(node, bgx_idx);
+ 	if (!bgx)
+ 		return 0;
+ 
+@@ -381,7 +411,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
+ {
+ 	struct bgx *bgx;
+ 
+-	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
++	bgx = get_bgx(node, bgx_idx);
+ 	if (!bgx)
+ 		return 0;
+ 
+@@ -409,7 +439,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
+ 	struct lmac *lmac;
+ 	u64    cfg;
+ 
+-	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
++	bgx = get_bgx(node, bgx_idx);
+ 	if (!bgx)
+ 		return;
+ 
+@@ -1326,11 +1356,13 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 		goto err_release_regions;
+ 	}
+ 
++	set_max_bgx_per_node(pdev);
++
+ 	pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
+ 	if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
+ 		bgx->bgx_id = (pci_resource_start(pdev,
+ 			PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
+-		bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
++		bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
+ 		bgx->max_lmac = MAX_LMAC_PER_BGX;
+ 		bgx_vnic[bgx->bgx_id] = bgx;
+ 	} else {
+diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+index a60f189..c5080f2c 100644
+--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
++++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+@@ -22,7 +22,6 @@
+ #define    MAX_BGX_PER_CN88XX			2
+ #define    MAX_BGX_PER_CN81XX			3 /* 2 BGXs + 1 RGX */
+ #define    MAX_BGX_PER_CN83XX			4
+-#define    MAX_BGX_PER_NODE			4
+ #define    MAX_LMAC_PER_BGX			4
+ #define    MAX_BGX_CHANS_PER_LMAC		16
+ #define    MAX_DMAC_PER_LMAC			8
+-- 
+2.5.5
+
+
+From 52401dec7de73bf432a335148082db3aa7970bbd Mon Sep 17 00:00:00 2001
+From: Thanneeru Srinivasulu <tsrinivasulu at cavium.com>
+Date: Tue, 7 Mar 2017 18:09:11 +0530
+Subject: [PATCH 14/14] net: thunderx: Allow IPv6 frames with zero UDP checksum
+
+Do not consider IPv6 frames with zero UDP checksum as frames
+with bad checksum and drop them.
+
+Signed-off-by: Thanneeru Srinivasulu <tsrinivasulu at cavium.com>
+Signed-off-by: Sunil Goutham <sgoutham at cavium.com>
+Signed-off-by: David S. Miller <davem at davemloft.net>
+(cherry picked from commit 36fa35d22bffc78c85b5e68adbdd99e914bec764)
+Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev at caviumnetworks.com>
+---
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+index 142c045..b44aadd 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -559,9 +559,11 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ 	nicvf_send_msg_to_pf(nic, &mbx);
+ 
+ 	if (!nic->sqs_mode && (qidx == 0)) {
+-		/* Enable checking L3/L4 length and TCP/UDP checksums */
++		/* Enable checking L3/L4 length and TCP/UDP checksums
++		 * Also allow IPv6 pkts with zero UDP checksum.
++		 */
+ 		nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
+-				      (BIT(24) | BIT(23) | BIT(21)));
++				      (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
+ 		nicvf_config_vlan_stripping(nic, nic->netdev->features);
+ 	}
+ 
+-- 
+2.5.5
+
diff --git a/SPECS/kernel-aarch64.spec b/SPECS/kernel-aarch64.spec
index b6d58e5..c197862 100644
--- a/SPECS/kernel-aarch64.spec
+++ b/SPECS/kernel-aarch64.spec
@@ -360,6 +360,7 @@ Patch1030: 1030-pca954x-pca955x-pca963x-ds1307-ACPI-support.patch
 Patch2000: ThunderX-7.3.1611.patch
 Patch2001: 0001-PCI-ASPM-Don-t-retrain-link-if-ASPM-not-possible.patch
 Patch2002: 0001-net-thunderx-acpi-fix-LMAC-initialization.patch
+Patch2003: 0001-thunderx-nic-updates.patch
 
 # QDF2400 Patches
 #Patch4000: 4000-arm64-Define-Qualcomm-Technologies-ARMv8-CPU.patch
@@ -730,6 +731,7 @@ git am %{PATCH1030}
 git am %{PATCH2000}
 git am %{PATCH2001}
 git am %{PATCH2002}
+git am %{PATCH2003}
 
 # NO LONGER NEEDED as of 4.5.0-15.2.1
 
-- 
2.5.5