diff --git a/Documentation/devicetree/bindings/serial/mvebu-uart.txt 
b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
index 6087defd9f93..d37fabe17bd1 100644
--- a/Documentation/devicetree/bindings/serial/mvebu-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mvebu-uart.txt
@@ -8,6 +8,6 @@ Required properties:
 Example:
        serial@12000 {
                compatible = "marvell,armada-3700-uart";
-               reg = <0x12000 0x400>;
+               reg = <0x12000 0x200>;
                interrupts = <43>;
        };
diff --git a/Makefile b/Makefile
index 57825473c031..a798f4777ae2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
 PATCHLEVEL = 14
-SUBLEVEL = 135
+SUBLEVEL = 136
 EXTRAVERSION =
 NAME = Petit Gorille
 
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi 
b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 8c0cf7efac65..b554cdaf5e53 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -134,7 +134,7 @@
 
                        uart0: serial@12000 {
                                compatible = "marvell,armada-3700-uart";
-                               reg = <0x12000 0x400>;
+                               reg = <0x12000 0x200>;
                                interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
                                status = "disabled";
                        };
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index a3c7f271ad4c..9ed290a9811c 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -234,6 +234,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 }
 
 #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
+#define COMPAT_MINSIGSTKSZ     2048
 
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 6394b4f0a69b..f42feab25dcf 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -8,27 +8,19 @@ config SH_ALPHA_BOARD
        bool
 
 config SH_DEVICE_TREE
-       bool "Board Described by Device Tree"
+       bool
        select OF
        select OF_EARLY_FLATTREE
        select TIMER_OF
        select COMMON_CLK
        select GENERIC_CALIBRATE_DELAY
-       help
-         Select Board Described by Device Tree to build a kernel that
-         does not hard-code any board-specific knowledge but instead uses
-         a device tree blob provided by the boot-loader. You must enable
-         drivers for any hardware you want to use separately. At this
-         time, only boards based on the open-hardware J-Core processors
-         have sufficient driver coverage to use this option; do not
-         select it if you are using original SuperH hardware.
 
 config SH_JCORE_SOC
        bool "J-Core SoC"
-       depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2)
+       select SH_DEVICE_TREE
        select CLKSRC_JCORE_PIT
        select JCORE_AIC
-       default y if CPU_J2
+       depends on CPU_J2
        help
          Select this option to include drivers core components of the
          J-Core SoC, including interrupt controllers and timers.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index e694fd2c4ed0..05e75d18b4d9 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1903,8 +1903,18 @@ static struct binder_thread 
*binder_get_txn_from_and_acq_inner(
 
 static void binder_free_transaction(struct binder_transaction *t)
 {
-       if (t->buffer)
-               t->buffer->transaction = NULL;
+       struct binder_proc *target_proc = t->to_proc;
+
+       if (target_proc) {
+               binder_inner_proc_lock(target_proc);
+               if (t->buffer)
+                       t->buffer->transaction = NULL;
+               binder_inner_proc_unlock(target_proc);
+       }
+       /*
+        * If the transaction has no target_proc, then
+        * t->buffer->transaction has already been cleared.
+        */
        kfree(t);
        binder_stats_deleted(BINDER_STAT_TRANSACTION);
 }
@@ -3426,10 +3436,12 @@ static int binder_thread_write(struct binder_proc *proc,
                                     buffer->debug_id,
                                     buffer->transaction ? "active" : 
"finished");
 
+                       binder_inner_proc_lock(proc);
                        if (buffer->transaction) {
                                buffer->transaction->buffer = NULL;
                                buffer->transaction = NULL;
                        }
+                       binder_inner_proc_unlock(proc);
                        if (buffer->async_transaction && buffer->target_node) {
                                struct binder_node *buf_node;
                                struct binder_work *w;
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 0ccf6bf01ed4..c50b68bbecdc 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -101,6 +101,9 @@ static int ath_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        ath = kzalloc(sizeof(*ath), GFP_KERNEL);
        if (!ath)
                return -ENOMEM;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 32527bdf4b50..6d41b2023f09 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -305,6 +305,9 @@ static int bcm_open(struct hci_uart *hu)
 
        bt_dev_dbg(hu->hdev, "hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
        if (!bcm)
                return -ENOMEM;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index aad07e40ea4f..c75311d4dd31 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -406,6 +406,9 @@ static int intel_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        intel = kzalloc(sizeof(*intel), GFP_KERNEL);
        if (!intel)
                return -ENOMEM;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 3b63a781f10f..43221def1d29 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -297,6 +297,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, 
struct sk_buff *skb)
        return 0;
 }
 
+/* Check the underlying device or tty has flow control support */
+bool hci_uart_has_flow_control(struct hci_uart *hu)
+{
+       /* serdev nodes check if the needed operations are present */
+       if (hu->serdev)
+               return true;
+
+       if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
+               return true;
+
+       return false;
+}
+
 /* Flow control or un-flow control the device */
 void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
 {
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index ffb00669346f..23791df081ba 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -66,6 +66,9 @@ static int mrvl_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
        if (!mrvl)
                return -ENOMEM;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 66e8c68e4607..e5ec2cf1755b 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -117,6 +117,7 @@ void hci_uart_unregister_device(struct hci_uart *hu);
 int hci_uart_tx_wakeup(struct hci_uart *hu);
 int hci_uart_init_ready(struct hci_uart *hu);
 void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
+bool hci_uart_has_flow_control(struct hci_uart *hu);
 void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
 void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
                         unsigned int oper_speed);
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 08f8e0107642..8f6903ec7aec 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -844,6 +844,8 @@ static int qup_i2c_bam_do_xfer(struct qup_i2c_dev *qup, 
struct i2c_msg *msg,
        }
 
        if (ret || qup->bus_err || qup->qup_err) {
+               reinit_completion(&qup->xfer);
+
                if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
                        dev_err(qup->dev, "change to run state timed out");
                        goto desc_err;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index baa4c58e2736..523d0889c2a4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3702,7 +3702,7 @@ static void intel_unmap(struct device *dev, dma_addr_t 
dev_addr, size_t size)
 
        freelist = domain_unmap(domain, start_pfn, last_pfn);
 
-       if (intel_iommu_strict) {
+       if (intel_iommu_strict || !has_iova_flush_queue(&domain->iovad)) {
                iommu_flush_iotlb_psi(iommu, domain, start_pfn,
                                      nrpages, !freelist, 0);
                /* free iova */
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 33edfa794ae9..9f35b9a0d6d8 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -58,9 +58,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long 
granule,
 }
 EXPORT_SYMBOL_GPL(init_iova_domain);
 
+bool has_iova_flush_queue(struct iova_domain *iovad)
+{
+       return !!iovad->fq;
+}
+
 static void free_iova_flush_queue(struct iova_domain *iovad)
 {
-       if (!iovad->fq)
+       if (!has_iova_flush_queue(iovad))
                return;
 
        if (timer_pending(&iovad->fq_timer))
@@ -78,13 +83,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
 int init_iova_flush_queue(struct iova_domain *iovad,
                          iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
 {
+       struct iova_fq __percpu *queue;
        int cpu;
 
        atomic64_set(&iovad->fq_flush_start_cnt,  0);
        atomic64_set(&iovad->fq_flush_finish_cnt, 0);
 
-       iovad->fq = alloc_percpu(struct iova_fq);
-       if (!iovad->fq)
+       queue = alloc_percpu(struct iova_fq);
+       if (!queue)
                return -ENOMEM;
 
        iovad->flush_cb   = flush_cb;
@@ -93,13 +99,17 @@ int init_iova_flush_queue(struct iova_domain *iovad,
        for_each_possible_cpu(cpu) {
                struct iova_fq *fq;
 
-               fq = per_cpu_ptr(iovad->fq, cpu);
+               fq = per_cpu_ptr(queue, cpu);
                fq->head = 0;
                fq->tail = 0;
 
                spin_lock_init(&fq->lock);
        }
 
+       smp_wmb();
+
+       iovad->fq = queue;
+
        setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
        atomic_set(&iovad->fq_timer_on, 0);
 
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c 
b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 17cc879ad2bb..35983c7c3137 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1963,6 +1963,9 @@ hfcsusb_probe(struct usb_interface *intf, const struct 
usb_device_id *id)
 
                                /* get endpoint base */
                                idx = ((ep_addr & 0x7f) - 1) * 2;
+                               if (idx > 15)
+                                       return -EIO;
+
                                if (ep_addr & 0x80)
                                        idx++;
                                attr = ep->desc.bmAttributes;
diff --git a/drivers/media/radio/radio-raremono.c 
b/drivers/media/radio/radio-raremono.c
index 3c0a22a54113..932c32e56d73 100644
--- a/drivers/media/radio/radio-raremono.c
+++ b/drivers/media/radio/radio-raremono.c
@@ -283,6 +283,14 @@ static int vidioc_g_frequency(struct file *file, void 
*priv,
        return 0;
 }
 
+static void raremono_device_release(struct v4l2_device *v4l2_dev)
+{
+       struct raremono_device *radio = to_raremono_dev(v4l2_dev);
+
+       kfree(radio->buffer);
+       kfree(radio);
+}
+
 /* File system interface */
 static const struct v4l2_file_operations usb_raremono_fops = {
        .owner          = THIS_MODULE,
@@ -307,12 +315,14 @@ static int usb_raremono_probe(struct usb_interface *intf,
        struct raremono_device *radio;
        int retval = 0;
 
-       radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), 
GFP_KERNEL);
-       if (radio)
-               radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, 
GFP_KERNEL);
-
-       if (!radio || !radio->buffer)
+       radio = kzalloc(sizeof(*radio), GFP_KERNEL);
+       if (!radio)
+               return -ENOMEM;
+       radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
+       if (!radio->buffer) {
+               kfree(radio);
                return -ENOMEM;
+       }
 
        radio->usbdev = interface_to_usbdev(intf);
        radio->intf = intf;
@@ -336,7 +346,8 @@ static int usb_raremono_probe(struct usb_interface *intf,
        if (retval != 3 ||
            (get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) {
                dev_info(&intf->dev, "this is not Thanko's Raremono.\n");
-               return -ENODEV;
+               retval = -ENODEV;
+               goto free_mem;
        }
 
        dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n",
@@ -345,7 +356,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
        retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
        if (retval < 0) {
                dev_err(&intf->dev, "couldn't register v4l2_device\n");
-               return retval;
+               goto free_mem;
        }
 
        mutex_init(&radio->lock);
@@ -357,6 +368,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
        radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops;
        radio->vdev.lock = &radio->lock;
        radio->vdev.release = video_device_release_empty;
+       radio->v4l2_dev.release = raremono_device_release;
 
        usb_set_intfdata(intf, &radio->v4l2_dev);
 
@@ -372,6 +384,10 @@ static int usb_raremono_probe(struct usb_interface *intf,
        }
        dev_err(&intf->dev, "could not register video device\n");
        v4l2_device_unregister(&radio->v4l2_dev);
+
+free_mem:
+       kfree(radio->buffer);
+       kfree(radio);
        return retval;
 }
 
diff --git a/drivers/media/usb/au0828/au0828-core.c 
b/drivers/media/usb/au0828/au0828-core.c
index 257ae0d8cfe2..e3f63299f85c 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -623,6 +623,12 @@ static int au0828_usb_probe(struct usb_interface 
*interface,
        /* Setup */
        au0828_card_setup(dev);
 
+       /*
+        * Store the pointer to the au0828_dev so it can be accessed in
+        * au0828_usb_disconnect
+        */
+       usb_set_intfdata(interface, dev);
+
        /* Analog TV */
        retval = au0828_analog_register(dev, interface);
        if (retval) {
@@ -641,12 +647,6 @@ static int au0828_usb_probe(struct usb_interface 
*interface,
        /* Remote controller */
        au0828_rc_register(dev);
 
-       /*
-        * Store the pointer to the au0828_dev so it can be accessed in
-        * au0828_usb_disconnect
-        */
-       usb_set_intfdata(interface, dev);
-
        pr_info("Registered device AU0828 [%s]\n",
                dev->board.name == NULL ? "Unset" : dev->board.name);
 
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c 
b/drivers/media/usb/cpia2/cpia2_usb.c
index 6089036049d9..0964ff556f4e 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -901,7 +901,6 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
        cpia2_unregister_camera(cam);
        v4l2_device_disconnect(&cam->v4l2_dev);
        mutex_unlock(&cam->v4l2_lock);
-       v4l2_device_put(&cam->v4l2_dev);
 
        if(cam->buffers) {
                DBG("Wakeup waiting processes\n");
@@ -913,6 +912,8 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
        DBG("Releasing interface\n");
        usb_driver_release_interface(&cpia2_driver, intf);
 
+       v4l2_device_put(&cam->v4l2_dev);
+
        LOG("CPiA2 camera disconnected.\n");
 }
 
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c 
b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index ddededc4ced4..18db7aaafcd6 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -1680,7 +1680,7 @@ static int pvr2_decoder_enable(struct pvr2_hdw *hdw,int 
enablefl)
        }
        if (!hdw->flag_decoder_missed) {
                pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-                          "WARNING: No decoder present");
+                          "***WARNING*** No decoder present");
                hdw->flag_decoder_missed = !0;
                trace_stbit("flag_decoder_missed",
                            hdw->flag_decoder_missed);
@@ -2365,7 +2365,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface 
*intf,
        if (hdw_desc->flag_is_experimental) {
                pvr2_trace(PVR2_TRACE_INFO, "**********");
                pvr2_trace(PVR2_TRACE_INFO,
-                          "WARNING: Support for this device (%s) is 
experimental.",
+                          "***WARNING*** Support for this device (%s) is 
experimental.",
                                                              
hdw_desc->description);
                pvr2_trace(PVR2_TRACE_INFO,
                           "Important functionality might not be entirely 
working.");
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c 
b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index ff7b4d1d385d..f57ddb382dbf 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -343,11 +343,11 @@ static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
 
        if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
                pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-                          "WARNING: Detected a wedged cx25840 chip; the device 
will not work.");
+                          "***WARNING*** Detected a wedged cx25840 chip; the 
device will not work.");
                pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-                          "WARNING: Try power cycling the pvrusb2 device.");
+                          "***WARNING*** Try power cycling the pvrusb2 
device.");
                pvr2_trace(PVR2_TRACE_ERROR_LEGS,
-                          "WARNING: Disabling further access to the device to 
prevent other foul-ups.");
+                          "***WARNING*** Disabling further access to the 
device to prevent other foul-ups.");
                // This blocks all further communication with the part.
                hdw->i2c_func[0x44] = NULL;
                pvr2_hdw_render_useless(hdw);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c 
b/drivers/media/usb/pvrusb2/pvrusb2-std.c
index 21bb20dba82c..243e2704ce3a 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
@@ -353,7 +353,7 @@ struct v4l2_standard *pvr2_std_create_enum(unsigned int 
*countptr,
                bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
                pvr2_trace(
                        PVR2_TRACE_ERROR_LEGS,
-                       "WARNING: Failed to classify the following standard(s): 
%.*s",
+                       "***WARNING*** Failed to classify the following 
standard(s): %.*s",
                        bcnt,buf);
        }
 
diff --git a/drivers/net/wireless/ath/ath10k/usb.c 
b/drivers/net/wireless/ath/ath10k/usb.c
index d4803ff5a78a..f09a4ad2e9de 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -1025,7 +1025,7 @@ static int ath10k_usb_probe(struct usb_interface 
*interface,
        }
 
        /* TODO: remove this once USB support is fully implemented */
-       ath10k_warn(ar, "WARNING: ath10k USB support is incomplete, don't 
expect anything to work!\n");
+       ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't 
expect anything to work!\n");
 
        return 0;
 
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 6eb0db37dd88..574b08af0d98 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -166,6 +166,14 @@ static long pps_cdev_ioctl(struct file *file,
                        pps->params.mode |= PPS_CANWAIT;
                pps->params.api_version = PPS_API_VERS;
 
+               /*
+                * Clear unused fields of pps_kparams to avoid leaking
+                * uninitialized data of the PPS_SETPARAMS caller via
+                * PPS_GETPARAMS
+                */
+               pps->params.assert_off_tu.flags = 0;
+               pps->params.clear_off_tu.flags = 0;
+
                spin_unlock_irq(&pps->lock);
 
                break;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 92eb9c3052ee..238d24348a98 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1119,20 +1119,23 @@ static int send_cap_msg(struct cap_msg_args *arg)
 }
 
 /*
- * Queue cap releases when an inode is dropped from our cache.  Since
- * inode is about to be destroyed, there is no need for i_ceph_lock.
+ * Queue cap releases when an inode is dropped from our cache.
  */
 void ceph_queue_caps_release(struct inode *inode)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct rb_node *p;
 
+       /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
+        * may call __ceph_caps_issued_mask() on a freeing inode. */
+       spin_lock(&ci->i_ceph_lock);
        p = rb_first(&ci->i_caps);
        while (p) {
                struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
                p = rb_next(p);
                __ceph_remove_cap(cap, true);
        }
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
diff --git a/fs/exec.c b/fs/exec.c
index 0936b5a8199a..4623fc3ac86b 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1808,7 +1808,7 @@ static int do_execveat_common(int fd, struct filename 
*filename,
        current->in_execve = 0;
        membarrier_execve(current);
        acct_update_integrals(current);
-       task_numa_free(current);
+       task_numa_free(current, false);
        free_bprm(bprm);
        kfree(pathbuf);
        putname(filename);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 0c7008fb6d5a..9e7d49fac4e3 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -416,10 +416,10 @@ struct nfs_client *nfs_get_client(const struct 
nfs_client_initdata *cl_init)
                clp = nfs_match_client(cl_init);
                if (clp) {
                        spin_unlock(&nn->nfs_client_lock);
-                       if (IS_ERR(clp))
-                               return clp;
                        if (new)
                                new->rpc_ops->free_client(new);
+                       if (IS_ERR(clp))
+                               return clp;
                        return nfs_found_client(cl_init, clp);
                }
                if (new) {
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index bf2c43635062..85a6fdd76e20 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1059,6 +1059,100 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry 
*dentry,
        return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU);
 }
 
+static int
+nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
+                          struct inode *inode, int error)
+{
+       switch (error) {
+       case 1:
+               dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
+                       __func__, dentry);
+               return 1;
+       case 0:
+               nfs_mark_for_revalidate(dir);
+               if (inode && S_ISDIR(inode->i_mode)) {
+                       /* Purge readdir caches. */
+                       nfs_zap_caches(inode);
+                       /*
+                        * We can't d_drop the root of a disconnected tree:
+                        * its d_hash is on the s_anon list and d_drop() would 
hide
+                        * it from shrink_dcache_for_unmount(), leading to busy
+                        * inodes on unmount and further oopses.
+                        */
+                       if (IS_ROOT(dentry))
+                               return 1;
+               }
+               dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
+                               __func__, dentry);
+               return 0;
+       }
+       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
+                               __func__, dentry, error);
+       return error;
+}
+
+static int
+nfs_lookup_revalidate_negative(struct inode *dir, struct dentry *dentry,
+                              unsigned int flags)
+{
+       int ret = 1;
+       if (nfs_neg_need_reval(dir, dentry, flags)) {
+               if (flags & LOOKUP_RCU)
+                       return -ECHILD;
+               ret = 0;
+       }
+       return nfs_lookup_revalidate_done(dir, dentry, NULL, ret);
+}
+
+static int
+nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
+                               struct inode *inode)
+{
+       nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+       return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
+}
+
+static int
+nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
+                            struct inode *inode)
+{
+       struct nfs_fh *fhandle;
+       struct nfs_fattr *fattr;
+       struct nfs4_label *label;
+       int ret;
+
+       ret = -ENOMEM;
+       fhandle = nfs_alloc_fhandle();
+       fattr = nfs_alloc_fattr();
+       label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
+       if (fhandle == NULL || fattr == NULL || IS_ERR(label))
+               goto out;
+
+       ret = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, 
label);
+       if (ret < 0) {
+               if (ret == -ESTALE || ret == -ENOENT)
+                       ret = 0;
+               goto out;
+       }
+       ret = 0;
+       if (nfs_compare_fh(NFS_FH(inode), fhandle))
+               goto out;
+       if (nfs_refresh_inode(inode, fattr) < 0)
+               goto out;
+
+       nfs_setsecurity(inode, fattr, label);
+       nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+
+       /* set a readdirplus hint that we had a cache miss */
+       nfs_force_use_readdirplus(dir);
+       ret = 1;
+out:
+       nfs_free_fattr(fattr);
+       nfs_free_fhandle(fhandle);
+       nfs4_label_free(label);
+       return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
+}
+
 /*
  * This is called every time the dcache has a lookup hit,
  * and we should check whether we can really trust that
@@ -1070,58 +1164,36 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry 
*dentry,
  * If the parent directory is seen to have changed, we throw out the
  * cached dentry and do a new lookup.
  */
-static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int
+nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+                        unsigned int flags)
 {
-       struct inode *dir;
        struct inode *inode;
-       struct dentry *parent;
-       struct nfs_fh *fhandle = NULL;
-       struct nfs_fattr *fattr = NULL;
-       struct nfs4_label *label = NULL;
        int error;
 
-       if (flags & LOOKUP_RCU) {
-               parent = ACCESS_ONCE(dentry->d_parent);
-               dir = d_inode_rcu(parent);
-               if (!dir)
-                       return -ECHILD;
-       } else {
-               parent = dget_parent(dentry);
-               dir = d_inode(parent);
-       }
        nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
        inode = d_inode(dentry);
 
-       if (!inode) {
-               if (nfs_neg_need_reval(dir, dentry, flags)) {
-                       if (flags & LOOKUP_RCU)
-                               return -ECHILD;
-                       goto out_bad;
-               }
-               goto out_valid;
-       }
+       if (!inode)
+               return nfs_lookup_revalidate_negative(dir, dentry, flags);
 
        if (is_bad_inode(inode)) {
-               if (flags & LOOKUP_RCU)
-                       return -ECHILD;
                dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
                                __func__, dentry);
                goto out_bad;
        }
 
        if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
-               goto out_set_verifier;
+               return nfs_lookup_revalidate_delegated(dir, dentry, inode);
 
        /* Force a full look up iff the parent directory has changed */
        if (!nfs_is_exclusive_create(dir, flags) &&
            nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
                error = nfs_lookup_verify_inode(inode, flags);
                if (error) {
-                       if (flags & LOOKUP_RCU)
-                               return -ECHILD;
                        if (error == -ESTALE)
-                               goto out_zap_parent;
-                       goto out_error;
+                               nfs_zap_caches(dir);
+                       goto out_bad;
                }
                nfs_advise_use_readdirplus(dir);
                goto out_valid;
@@ -1133,81 +1205,45 @@ static int nfs_lookup_revalidate(struct dentry *dentry, 
unsigned int flags)
        if (NFS_STALE(inode))
                goto out_bad;
 
-       error = -ENOMEM;
-       fhandle = nfs_alloc_fhandle();
-       fattr = nfs_alloc_fattr();
-       if (fhandle == NULL || fattr == NULL)
-               goto out_error;
-
-       label = nfs4_label_alloc(NFS_SERVER(inode), GFP_NOWAIT);
-       if (IS_ERR(label))
-               goto out_error;
-
        trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
-       error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, 
label);
+       error = nfs_lookup_revalidate_dentry(dir, dentry, inode);
        trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
-       if (error == -ESTALE || error == -ENOENT)
-               goto out_bad;
-       if (error)
-               goto out_error;
-       if (nfs_compare_fh(NFS_FH(inode), fhandle))
-               goto out_bad;
-       if ((error = nfs_refresh_inode(inode, fattr)) != 0)
-               goto out_bad;
-
-       nfs_setsecurity(inode, fattr, label);
-
-       nfs_free_fattr(fattr);
-       nfs_free_fhandle(fhandle);
-       nfs4_label_free(label);
+       return error;
+out_valid:
+       return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
+out_bad:
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+       return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
+}
 
-       /* set a readdirplus hint that we had a cache miss */
-       nfs_force_use_readdirplus(dir);
+static int
+__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
+                       int (*reval)(struct inode *, struct dentry *, unsigned 
int))
+{
+       struct dentry *parent;
+       struct inode *dir;
+       int ret;
 
-out_set_verifier:
-       nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- out_valid:
        if (flags & LOOKUP_RCU) {
+               parent = ACCESS_ONCE(dentry->d_parent);
+               dir = d_inode_rcu(parent);
+               if (!dir)
+                       return -ECHILD;
+               ret = reval(dir, dentry, flags);
                if (parent != ACCESS_ONCE(dentry->d_parent))
                        return -ECHILD;
-       } else
+       } else {
+               parent = dget_parent(dentry);
+               ret = reval(d_inode(parent), dentry, flags);
                dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
-                       __func__, dentry);
-       return 1;
-out_zap_parent:
-       nfs_zap_caches(dir);
- out_bad:
-       WARN_ON(flags & LOOKUP_RCU);
-       nfs_free_fattr(fattr);
-       nfs_free_fhandle(fhandle);
-       nfs4_label_free(label);
-       nfs_mark_for_revalidate(dir);
-       if (inode && S_ISDIR(inode->i_mode)) {
-               /* Purge readdir caches. */
-               nfs_zap_caches(inode);
-               /*
-                * We can't d_drop the root of a disconnected tree:
-                * its d_hash is on the s_anon list and d_drop() would hide
-                * it from shrink_dcache_for_unmount(), leading to busy
-                * inodes on unmount and further oopses.
-                */
-               if (IS_ROOT(dentry))
-                       goto out_valid;
        }
-       dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
-                       __func__, dentry);
-       return 0;
-out_error:
-       WARN_ON(flags & LOOKUP_RCU);
-       nfs_free_fattr(fattr);
-       nfs_free_fhandle(fhandle);
-       nfs4_label_free(label);
-       dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
-                       __func__, dentry, error);
-       return error;
+       return ret;
+}
+
+static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
 }
 
 /*
@@ -1560,62 +1596,55 @@ int nfs_atomic_open(struct inode *dir, struct dentry 
*dentry,
 }
 EXPORT_SYMBOL_GPL(nfs_atomic_open);
 
-static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int
+nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+                         unsigned int flags)
 {
        struct inode *inode;
-       int ret = 0;
 
        if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
-               goto no_open;
+               goto full_reval;
        if (d_mountpoint(dentry))
-               goto no_open;
-       if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1)
-               goto no_open;
+               goto full_reval;
 
        inode = d_inode(dentry);
 
        /* We can't create new files in nfs_open_revalidate(), so we
         * optimize away revalidation of negative dentries.
         */
-       if (inode == NULL) {
-               struct dentry *parent;
-               struct inode *dir;
-
-               if (flags & LOOKUP_RCU) {
-                       parent = ACCESS_ONCE(dentry->d_parent);
-                       dir = d_inode_rcu(parent);
-                       if (!dir)
-                               return -ECHILD;
-               } else {
-                       parent = dget_parent(dentry);
-                       dir = d_inode(parent);
-               }
-               if (!nfs_neg_need_reval(dir, dentry, flags))
-                       ret = 1;
-               else if (flags & LOOKUP_RCU)
-                       ret = -ECHILD;
-               if (!(flags & LOOKUP_RCU))
-                       dput(parent);
-               else if (parent != ACCESS_ONCE(dentry->d_parent))
-                       return -ECHILD;
-               goto out;
-       }
+       if (inode == NULL)
+               goto full_reval;
+
+       if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
+               return nfs_lookup_revalidate_delegated(dir, dentry, inode);
 
        /* NFS only supports OPEN on regular files */
        if (!S_ISREG(inode->i_mode))
-               goto no_open;
+               goto full_reval;
+
        /* We cannot do exclusive creation on a positive dentry */
-       if (flags & LOOKUP_EXCL)
-               goto no_open;
+       if (flags & (LOOKUP_EXCL | LOOKUP_REVAL))
+               goto reval_dentry;
+
+       /* Check if the directory changed */
+       if (!nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU))
+               goto reval_dentry;
 
        /* Let f_op->open() actually open (and revalidate) the file */
-       ret = 1;
+       return 1;
+reval_dentry:
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+       return nfs_lookup_revalidate_dentry(dir, dentry, inode);;
 
-out:
-       return ret;
+full_reval:
+       return nfs_do_lookup_revalidate(dir, dentry, flags);
+}
 
-no_open:
-       return nfs_lookup_revalidate(dentry, flags);
+static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       return __nfs_lookup_revalidate(dentry, flags,
+                       nfs4_do_lookup_revalidate);
 }
 
 #endif /* CONFIG_NFSV4 */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 209a21ed5f97..27deee5c8fa8 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1317,12 +1317,20 @@ static bool nfs4_mode_match_open_stateid(struct 
nfs4_state *state,
        return false;
 }
 
-static int can_open_cached(struct nfs4_state *state, fmode_t mode, int 
open_mode)
+static int can_open_cached(struct nfs4_state *state, fmode_t mode,
+               int open_mode, enum open_claim_type4 claim)
 {
        int ret = 0;
 
        if (open_mode & (O_EXCL|O_TRUNC))
                goto out;
+       switch (claim) {
+       case NFS4_OPEN_CLAIM_NULL:
+       case NFS4_OPEN_CLAIM_FH:
+               goto out;
+       default:
+               break;
+       }
        switch (mode & (FMODE_READ|FMODE_WRITE)) {
                case FMODE_READ:
                        ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
@@ -1617,7 +1625,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct 
nfs4_opendata *opendata)
 
        for (;;) {
                spin_lock(&state->owner->so_lock);
-               if (can_open_cached(state, fmode, open_mode)) {
+               if (can_open_cached(state, fmode, open_mode, claim)) {
                        update_open_stateflags(state, fmode);
                        spin_unlock(&state->owner->so_lock);
                        goto out_return_state;
@@ -2141,7 +2149,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void 
*calldata)
        if (data->state != NULL) {
                struct nfs_delegation *delegation;
 
-               if (can_open_cached(data->state, data->o_arg.fmode, 
data->o_arg.open_flags))
+               if (can_open_cached(data->state, data->o_arg.fmode,
+                                       data->o_arg.open_flags, claim))
                        goto out_no_action;
                rcu_read_lock();
                delegation = 
rcu_dereference(NFS_I(data->state->inode)->delegation);
diff --git a/include/linux/iova.h b/include/linux/iova.h
index d179b9bf7814..7d23bbb887f2 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -154,6 +154,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, 
unsigned long pfn_lo,
 void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
 void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
        unsigned long start_pfn, unsigned long pfn_32bit);
+bool has_iova_flush_queue(struct iova_domain *iovad);
 int init_iova_flush_queue(struct iova_domain *iovad,
                          iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -234,6 +235,11 @@ static inline void init_iova_domain(struct iova_domain 
*iovad,
 {
 }
 
+static inline bool has_iova_flush_queue(struct iova_domain *iovad)
+{
+       return false;
+}
+
 static inline int init_iova_flush_queue(struct iova_domain *iovad,
                                        iova_flush_cb flush_cb,
                                        iova_entry_dtor entry_dtor)
diff --git a/include/linux/sched/numa_balancing.h 
b/include/linux/sched/numa_balancing.h
index e7dd04a84ba8..3988762efe15 100644
--- a/include/linux/sched/numa_balancing.h
+++ b/include/linux/sched/numa_balancing.h
@@ -19,7 +19,7 @@
 extern void task_numa_fault(int last_node, int node, int pages, int flags);
 extern pid_t task_numa_group_id(struct task_struct *p);
 extern void set_numabalancing_state(bool enabled);
-extern void task_numa_free(struct task_struct *p);
+extern void task_numa_free(struct task_struct *p, bool final);
 extern bool should_numa_migrate_memory(struct task_struct *p, struct page 
*page,
                                        int src_nid, int dst_cpu);
 #else
@@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
 static inline void set_numabalancing_state(bool enabled)
 {
 }
-static inline void task_numa_free(struct task_struct *p)
+static inline void task_numa_free(struct task_struct *p, bool final)
 {
 }
 static inline bool should_numa_migrate_memory(struct task_struct *p,
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 5fb3f6361090..d3775b5379e4 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -22,9 +22,6 @@
 
 #include "vsock_addr.h"
 
-/* vsock-specific sock->sk_state constants */
-#define VSOCK_SS_LISTEN 255
-
 #define LAST_RESERVED_PORT 1023
 
 #define vsock_sk(__sk)    ((struct vsock_sock *)__sk)
diff --git a/kernel/fork.c b/kernel/fork.c
index a5bb8fad5475..919e7cd5cd23 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -415,7 +415,7 @@ void __put_task_struct(struct task_struct *tsk)
        WARN_ON(tsk == current);
 
        cgroup_free(tsk);
-       task_numa_free(tsk);
+       task_numa_free(tsk, true);
        security_task_free(tsk);
        exit_creds(tsk);
        delayacct_tsk_free(tsk);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index af7de1f9906c..0a4e882d4308 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2358,13 +2358,23 @@ static void task_numa_group(struct task_struct *p, int 
cpupid, int flags,
        return;
 }
 
-void task_numa_free(struct task_struct *p)
+/*
+ * Get rid of NUMA staticstics associated with a task (either current or dead).
+ * If @final is set, the task is dead and has reached refcount zero, so we can
+ * safely free all relevant data structures. Otherwise, there might be
+ * concurrent reads from places like load balancing and procfs, and we should
+ * reset the data back to default state without freeing ->numa_faults.
+ */
+void task_numa_free(struct task_struct *p, bool final)
 {
        struct numa_group *grp = p->numa_group;
-       void *numa_faults = p->numa_faults;
+       unsigned long *numa_faults = p->numa_faults;
        unsigned long flags;
        int i;
 
+       if (!numa_faults)
+               return;
+
        if (grp) {
                spin_lock_irqsave(&grp->lock, flags);
                for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
@@ -2377,8 +2387,14 @@ void task_numa_free(struct task_struct *p)
                put_numa_group(grp);
        }
 
-       p->numa_faults = NULL;
-       kfree(numa_faults);
+       if (final) {
+               p->numa_faults = NULL;
+               kfree(numa_faults);
+       } else {
+               p->total_numa_faults = 0;
+               for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+                       numa_faults[i] = 0;
+       }
 }
 
 /*
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 423091727e15..2aaf7f8a3a96 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -89,9 +89,12 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, 
struct sk_buff *skb,
        __ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
 
        err = ip_local_out(net, sk, skb);
-       if (unlikely(net_xmit_eval(err)))
-               pkt_len = 0;
-       iptunnel_xmit_stats(dev, pkt_len);
+
+       if (dev) {
+               if (unlikely(net_xmit_eval(err)))
+                       pkt_len = 0;
+               iptunnel_xmit_stats(dev, pkt_len);
+       }
 }
 EXPORT_SYMBOL_GPL(iptunnel_xmit);
 
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index f2fd556c1233..b41170417316 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -36,7 +36,7 @@
  * not support simultaneous connects (two "client" sockets connecting).
  *
  * - "Server" sockets are referred to as listener sockets throughout this
- * implementation because they are in the VSOCK_SS_LISTEN state.  When a
+ * implementation because they are in the TCP_LISTEN state.  When a
  * connection request is received (the second kind of socket mentioned above),
  * we create a new socket and refer to it as a pending socket.  These pending
  * sockets are placed on the pending connection list of the listener socket.
@@ -82,6 +82,15 @@
  * argument, we must ensure the reference count is increased to ensure the
  * socket isn't freed before the function is run; the deferred function will
  * then drop the reference.
+ *
+ * - sk->sk_state uses the TCP state constants because they are widely used by
+ * other address families and exposed to userspace tools like ss(8):
+ *
+ *   TCP_CLOSE - unconnected
+ *   TCP_SYN_SENT - connecting
+ *   TCP_ESTABLISHED - connected
+ *   TCP_CLOSING - disconnecting
+ *   TCP_LISTEN - listening
  */
 
 #include <linux/types.h>
@@ -279,7 +288,8 @@ EXPORT_SYMBOL_GPL(vsock_insert_connected);
 void vsock_remove_bound(struct vsock_sock *vsk)
 {
        spin_lock_bh(&vsock_table_lock);
-       __vsock_remove_bound(vsk);
+       if (__vsock_in_bound_table(vsk))
+               __vsock_remove_bound(vsk);
        spin_unlock_bh(&vsock_table_lock);
 }
 EXPORT_SYMBOL_GPL(vsock_remove_bound);
@@ -287,7 +297,8 @@ EXPORT_SYMBOL_GPL(vsock_remove_bound);
 void vsock_remove_connected(struct vsock_sock *vsk)
 {
        spin_lock_bh(&vsock_table_lock);
-       __vsock_remove_connected(vsk);
+       if (__vsock_in_connected_table(vsk))
+               __vsock_remove_connected(vsk);
        spin_unlock_bh(&vsock_table_lock);
 }
 EXPORT_SYMBOL_GPL(vsock_remove_connected);
@@ -323,35 +334,10 @@ struct sock *vsock_find_connected_socket(struct 
sockaddr_vm *src,
 }
 EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
 
-static bool vsock_in_bound_table(struct vsock_sock *vsk)
-{
-       bool ret;
-
-       spin_lock_bh(&vsock_table_lock);
-       ret = __vsock_in_bound_table(vsk);
-       spin_unlock_bh(&vsock_table_lock);
-
-       return ret;
-}
-
-static bool vsock_in_connected_table(struct vsock_sock *vsk)
-{
-       bool ret;
-
-       spin_lock_bh(&vsock_table_lock);
-       ret = __vsock_in_connected_table(vsk);
-       spin_unlock_bh(&vsock_table_lock);
-
-       return ret;
-}
-
 void vsock_remove_sock(struct vsock_sock *vsk)
 {
-       if (vsock_in_bound_table(vsk))
-               vsock_remove_bound(vsk);
-
-       if (vsock_in_connected_table(vsk))
-               vsock_remove_connected(vsk);
+       vsock_remove_bound(vsk);
+       vsock_remove_connected(vsk);
 }
 EXPORT_SYMBOL_GPL(vsock_remove_sock);
 
@@ -482,10 +468,9 @@ static void vsock_pending_work(struct work_struct *work)
         * incoming packets can't find this socket, and to reduce the reference
         * count.
         */
-       if (vsock_in_connected_table(vsk))
-               vsock_remove_connected(vsk);
+       vsock_remove_connected(vsk);
 
-       sk->sk_state = SS_FREE;
+       sk->sk_state = TCP_CLOSE;
 
 out:
        release_sock(sk);
@@ -626,7 +611,6 @@ struct sock *__vsock_create(struct net *net,
 
        sk->sk_destruct = vsock_sk_destruct;
        sk->sk_backlog_rcv = vsock_queue_rcv_skb;
-       sk->sk_state = 0;
        sock_reset_flag(sk, SOCK_DONE);
 
        INIT_LIST_HEAD(&vsk->bound_table);
@@ -902,7 +886,7 @@ static unsigned int vsock_poll(struct file *file, struct 
socket *sock,
                /* Listening sockets that have connections in their accept
                 * queue can be read.
                 */
-               if (sk->sk_state == VSOCK_SS_LISTEN
+               if (sk->sk_state == TCP_LISTEN
                    && !vsock_is_accept_queue_empty(sk))
                        mask |= POLLIN | POLLRDNORM;
 
@@ -931,7 +915,7 @@ static unsigned int vsock_poll(struct file *file, struct 
socket *sock,
                }
 
                /* Connected sockets that can produce data can be written. */
-               if (sk->sk_state == SS_CONNECTED) {
+               if (sk->sk_state == TCP_ESTABLISHED) {
                        if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
                                bool space_avail_now = false;
                                int ret = transport->notify_poll_out(
@@ -953,7 +937,7 @@ static unsigned int vsock_poll(struct file *file, struct 
socket *sock,
                 * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
                 * but local send is not shutdown.
                 */
-               if (sk->sk_state == SS_UNCONNECTED) {
+               if (sk->sk_state == TCP_CLOSE) {
                        if (!(sk->sk_shutdown & SEND_SHUTDOWN))
                                mask |= POLLOUT | POLLWRNORM;
 
@@ -1123,9 +1107,9 @@ static void vsock_connect_timeout(struct work_struct 
*work)
        sk = sk_vsock(vsk);
 
        lock_sock(sk);
-       if (sk->sk_state == SS_CONNECTING &&
+       if (sk->sk_state == TCP_SYN_SENT &&
            (sk->sk_shutdown != SHUTDOWN_MASK)) {
-               sk->sk_state = SS_UNCONNECTED;
+               sk->sk_state = TCP_CLOSE;
                sk->sk_err = ETIMEDOUT;
                sk->sk_error_report(sk);
                cancel = 1;
@@ -1171,7 +1155,7 @@ static int vsock_stream_connect(struct socket *sock, 
struct sockaddr *addr,
                err = -EALREADY;
                break;
        default:
-               if ((sk->sk_state == VSOCK_SS_LISTEN) ||
+               if ((sk->sk_state == TCP_LISTEN) ||
                    vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
                        err = -EINVAL;
                        goto out;
@@ -1194,7 +1178,7 @@ static int vsock_stream_connect(struct socket *sock, 
struct sockaddr *addr,
                if (err)
                        goto out;
 
-               sk->sk_state = SS_CONNECTING;
+               sk->sk_state = TCP_SYN_SENT;
 
                err = transport->connect(vsk);
                if (err < 0)
@@ -1214,7 +1198,7 @@ static int vsock_stream_connect(struct socket *sock, 
struct sockaddr *addr,
        timeout = vsk->connect_timeout;
        prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 
-       while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) {
+       while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
                if (flags & O_NONBLOCK) {
                        /* If we're not going to block, we schedule a timeout
                         * function to generate a timeout on the connection
@@ -1235,13 +1219,13 @@ static int vsock_stream_connect(struct socket *sock, 
struct sockaddr *addr,
 
                if (signal_pending(current)) {
                        err = sock_intr_errno(timeout);
-                       sk->sk_state = SS_UNCONNECTED;
+                       sk->sk_state = TCP_CLOSE;
                        sock->state = SS_UNCONNECTED;
                        vsock_transport_cancel_pkt(vsk);
                        goto out_wait;
                } else if (timeout == 0) {
                        err = -ETIMEDOUT;
-                       sk->sk_state = SS_UNCONNECTED;
+                       sk->sk_state = TCP_CLOSE;
                        sock->state = SS_UNCONNECTED;
                        vsock_transport_cancel_pkt(vsk);
                        goto out_wait;
@@ -1252,7 +1236,7 @@ static int vsock_stream_connect(struct socket *sock, 
struct sockaddr *addr,
 
        if (sk->sk_err) {
                err = -sk->sk_err;
-               sk->sk_state = SS_UNCONNECTED;
+               sk->sk_state = TCP_CLOSE;
                sock->state = SS_UNCONNECTED;
        } else {
                err = 0;
@@ -1285,7 +1269,7 @@ static int vsock_accept(struct socket *sock, struct 
socket *newsock, int flags,
                goto out;
        }
 
-       if (listener->sk_state != VSOCK_SS_LISTEN) {
+       if (listener->sk_state != TCP_LISTEN) {
                err = -EINVAL;
                goto out;
        }
@@ -1375,7 +1359,7 @@ static int vsock_listen(struct socket *sock, int backlog)
        }
 
        sk->sk_max_ack_backlog = backlog;
-       sk->sk_state = VSOCK_SS_LISTEN;
+       sk->sk_state = TCP_LISTEN;
 
        err = 0;
 
@@ -1555,7 +1539,7 @@ static int vsock_stream_sendmsg(struct socket *sock, 
struct msghdr *msg,
 
        /* Callers should not provide a destination with stream sockets. */
        if (msg->msg_namelen) {
-               err = sk->sk_state == SS_CONNECTED ? -EISCONN : -EOPNOTSUPP;
+               err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
                goto out;
        }
 
@@ -1566,7 +1550,7 @@ static int vsock_stream_sendmsg(struct socket *sock, 
struct msghdr *msg,
                goto out;
        }
 
-       if (sk->sk_state != SS_CONNECTED ||
+       if (sk->sk_state != TCP_ESTABLISHED ||
            !vsock_addr_bound(&vsk->local_addr)) {
                err = -ENOTCONN;
                goto out;
@@ -1690,7 +1674,7 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr 
*msg, size_t len,
 
        lock_sock(sk);
 
-       if (sk->sk_state != SS_CONNECTED) {
+       if (sk->sk_state != TCP_ESTABLISHED) {
                /* Recvmsg is supposed to return 0 if a peer performs an
                 * orderly shutdown. Differentiate between that case and when a
                 * peer has not connected or a local shutdown occured with the
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 3bee93bc5d11..52ac3e49c7ef 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -35,6 +35,9 @@
 /* The MTU is 16KB per the host side's design */
 #define HVS_MTU_SIZE           (1024 * 16)
 
+/* How long to wait for graceful shutdown of a connection */
+#define HVS_CLOSE_TIMEOUT (8 * HZ)
+
 struct vmpipe_proto_header {
        u32 pkt_type;
        u32 data_size;
@@ -290,19 +293,32 @@ static void hvs_channel_cb(void *ctx)
                sk->sk_write_space(sk);
 }
 
-static void hvs_close_connection(struct vmbus_channel *chan)
+static void hvs_do_close_lock_held(struct vsock_sock *vsk,
+                                  bool cancel_timeout)
 {
-       struct sock *sk = get_per_channel_state(chan);
-       struct vsock_sock *vsk = vsock_sk(sk);
-
-       lock_sock(sk);
+       struct sock *sk = sk_vsock(vsk);
 
-       sk->sk_state = SS_UNCONNECTED;
        sock_set_flag(sk, SOCK_DONE);
-       vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
-
+       vsk->peer_shutdown = SHUTDOWN_MASK;
+       if (vsock_stream_has_data(vsk) <= 0)
+               sk->sk_state = TCP_CLOSING;
        sk->sk_state_change(sk);
+       if (vsk->close_work_scheduled &&
+           (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
+               vsk->close_work_scheduled = false;
+               vsock_remove_sock(vsk);
 
+               /* Release the reference taken while scheduling the timeout */
+               sock_put(sk);
+       }
+}
+
+static void hvs_close_connection(struct vmbus_channel *chan)
+{
+       struct sock *sk = get_per_channel_state(chan);
+
+       lock_sock(sk);
+       hvs_do_close_lock_held(vsock_sk(sk), true);
        release_sock(sk);
 }
 
@@ -336,8 +352,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
 
        lock_sock(sk);
 
-       if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) ||
-           (!conn_from_host && sk->sk_state != SS_CONNECTING))
+       if ((conn_from_host && sk->sk_state != TCP_LISTEN) ||
+           (!conn_from_host && sk->sk_state != TCP_SYN_SENT))
                goto out;
 
        if (conn_from_host) {
@@ -349,7 +365,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
                if (!new)
                        goto out;
 
-               new->sk_state = SS_CONNECTING;
+               new->sk_state = TCP_SYN_SENT;
                vnew = vsock_sk(new);
                hvs_new = vnew->trans;
                hvs_new->chan = chan;
@@ -383,7 +399,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
        hvs_set_channel_pending_send_size(chan);
 
        if (conn_from_host) {
-               new->sk_state = SS_CONNECTED;
+               new->sk_state = TCP_ESTABLISHED;
                sk->sk_ack_backlog++;
 
                hvs_addr_init(&vnew->local_addr, if_type);
@@ -396,7 +412,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
 
                vsock_enqueue_accept(sk, new);
        } else {
-               sk->sk_state = SS_CONNECTED;
+               sk->sk_state = TCP_ESTABLISHED;
                sk->sk_socket->state = SS_CONNECTED;
 
                vsock_insert_connected(vsock_sk(sk));
@@ -446,50 +462,80 @@ static int hvs_connect(struct vsock_sock *vsk)
        return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id);
 }
 
+static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
+{
+       struct vmpipe_proto_header hdr;
+
+       if (hvs->fin_sent || !hvs->chan)
+               return;
+
+       /* It can't fail: see hvs_channel_writable_bytes(). */
+       (void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0);
+       hvs->fin_sent = true;
+}
+
 static int hvs_shutdown(struct vsock_sock *vsk, int mode)
 {
        struct sock *sk = sk_vsock(vsk);
-       struct vmpipe_proto_header hdr;
-       struct hvs_send_buf *send_buf;
-       struct hvsock *hvs;
 
        if (!(mode & SEND_SHUTDOWN))
                return 0;
 
        lock_sock(sk);
+       hvs_shutdown_lock_held(vsk->trans, mode);
+       release_sock(sk);
+       return 0;
+}
 
-       hvs = vsk->trans;
-       if (hvs->fin_sent)
-               goto out;
-
-       send_buf = (struct hvs_send_buf *)&hdr;
+static void hvs_close_timeout(struct work_struct *work)
+{
+       struct vsock_sock *vsk =
+               container_of(work, struct vsock_sock, close_work.work);
+       struct sock *sk = sk_vsock(vsk);
 
-       /* It can't fail: see hvs_channel_writable_bytes(). */
-       (void)hvs_send_data(hvs->chan, send_buf, 0);
+       sock_hold(sk);
+       lock_sock(sk);
+       if (!sock_flag(sk, SOCK_DONE))
+               hvs_do_close_lock_held(vsk, false);
 
-       hvs->fin_sent = true;
-out:
+       vsk->close_work_scheduled = false;
        release_sock(sk);
-       return 0;
+       sock_put(sk);
 }
 
-static void hvs_release(struct vsock_sock *vsk)
+/* Returns true, if it is safe to remove socket; false otherwise */
+static bool hvs_close_lock_held(struct vsock_sock *vsk)
 {
        struct sock *sk = sk_vsock(vsk);
-       struct hvsock *hvs = vsk->trans;
-       struct vmbus_channel *chan;
 
-       lock_sock(sk);
+       if (!(sk->sk_state == TCP_ESTABLISHED ||
+             sk->sk_state == TCP_CLOSING))
+               return true;
 
-       sk->sk_state = TCP_CLOSING;
-       vsock_remove_sock(vsk);
+       if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
+               hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK);
 
-       release_sock(sk);
+       if (sock_flag(sk, SOCK_DONE))
+               return true;
 
-       chan = hvs->chan;
-       if (chan)
-               hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
+       /* This reference will be dropped by the delayed close routine */
+       sock_hold(sk);
+       INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout);
+       vsk->close_work_scheduled = true;
+       schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT);
+       return false;
+}
 
+static void hvs_release(struct vsock_sock *vsk)
+{
+       struct sock *sk = sk_vsock(vsk);
+       bool remove_sock;
+
+       lock_sock(sk);
+       remove_sock = hvs_close_lock_held(vsk);
+       release_sock(sk);
+       if (remove_sock)
+               vsock_remove_sock(vsk);
 }
 
 static void hvs_destruct(struct vsock_sock *vsk)
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 5ebeef8ae3fa..96ab344f17bb 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -417,7 +417,7 @@ static void virtio_vsock_event_fill(struct virtio_vsock 
*vsock)
 static void virtio_vsock_reset_sock(struct sock *sk)
 {
        lock_sock(sk);
-       sk->sk_state = SS_UNCONNECTED;
+       sk->sk_state = TCP_CLOSE;
        sk->sk_err = ECONNRESET;
        sk->sk_error_report(sk);
        release_sock(sk);
diff --git a/net/vmw_vsock/virtio_transport_common.c 
b/net/vmw_vsock/virtio_transport_common.c
index 84d3c0aadd73..f3f3d06cb6d8 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -716,7 +716,7 @@ static void virtio_transport_do_close(struct vsock_sock 
*vsk,
        sock_set_flag(sk, SOCK_DONE);
        vsk->peer_shutdown = SHUTDOWN_MASK;
        if (vsock_stream_has_data(vsk) <= 0)
-               sk->sk_state = SS_DISCONNECTING;
+               sk->sk_state = TCP_CLOSING;
        sk->sk_state_change(sk);
 
        if (vsk->close_work_scheduled &&
@@ -756,8 +756,8 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
 {
        struct sock *sk = &vsk->sk;
 
-       if (!(sk->sk_state == SS_CONNECTED ||
-             sk->sk_state == SS_DISCONNECTING))
+       if (!(sk->sk_state == TCP_ESTABLISHED ||
+             sk->sk_state == TCP_CLOSING))
                return true;
 
        /* Already received SHUTDOWN from peer, reply with RST */
@@ -816,7 +816,7 @@ virtio_transport_recv_connecting(struct sock *sk,
 
        switch (le16_to_cpu(pkt->hdr.op)) {
        case VIRTIO_VSOCK_OP_RESPONSE:
-               sk->sk_state = SS_CONNECTED;
+               sk->sk_state = TCP_ESTABLISHED;
                sk->sk_socket->state = SS_CONNECTED;
                vsock_insert_connected(vsk);
                sk->sk_state_change(sk);
@@ -836,7 +836,7 @@ virtio_transport_recv_connecting(struct sock *sk,
 
 destroy:
        virtio_transport_reset(vsk, pkt);
-       sk->sk_state = SS_UNCONNECTED;
+       sk->sk_state = TCP_CLOSE;
        sk->sk_err = skerr;
        sk->sk_error_report(sk);
        return err;
@@ -872,7 +872,7 @@ virtio_transport_recv_connected(struct sock *sk,
                        vsk->peer_shutdown |= SEND_SHUTDOWN;
                if (vsk->peer_shutdown == SHUTDOWN_MASK &&
                    vsock_stream_has_data(vsk) <= 0)
-                       sk->sk_state = SS_DISCONNECTING;
+                       sk->sk_state = TCP_CLOSING;
                if (le32_to_cpu(pkt->hdr.flags))
                        sk->sk_state_change(sk);
                break;
@@ -943,7 +943,7 @@ virtio_transport_recv_listen(struct sock *sk, struct 
virtio_vsock_pkt *pkt)
 
        lock_sock_nested(child, SINGLE_DEPTH_NESTING);
 
-       child->sk_state = SS_CONNECTED;
+       child->sk_state = TCP_ESTABLISHED;
 
        vchild = vsock_sk(child);
        vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
@@ -1031,18 +1031,18 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt 
*pkt)
                sk->sk_write_space(sk);
 
        switch (sk->sk_state) {
-       case VSOCK_SS_LISTEN:
+       case TCP_LISTEN:
                virtio_transport_recv_listen(sk, pkt);
                virtio_transport_free_pkt(pkt);
                break;
-       case SS_CONNECTING:
+       case TCP_SYN_SENT:
                virtio_transport_recv_connecting(sk, pkt);
                virtio_transport_free_pkt(pkt);
                break;
-       case SS_CONNECTED:
+       case TCP_ESTABLISHED:
                virtio_transport_recv_connected(sk, pkt);
                break;
-       case SS_DISCONNECTING:
+       case TCP_CLOSING:
                virtio_transport_recv_disconnecting(sk, pkt);
                virtio_transport_free_pkt(pkt);
                break;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index ad3f47a714f3..ba4cb18c4b9a 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -776,7 +776,7 @@ static int vmci_transport_recv_stream_cb(void *data, struct 
vmci_datagram *dg)
                /* The local context ID may be out of date, update it. */
                vsk->local_addr.svm_cid = dst.svm_cid;
 
-               if (sk->sk_state == SS_CONNECTED)
+               if (sk->sk_state == TCP_ESTABLISHED)
                        vmci_trans(vsk)->notify_ops->handle_notify_pkt(
                                        sk, pkt, true, &dst, &src,
                                        &bh_process_pkt);
@@ -834,7 +834,9 @@ static void vmci_transport_handle_detach(struct sock *sk)
                 * left in our consume queue.
                 */
                if (vsock_stream_has_data(vsk) <= 0) {
-                       if (sk->sk_state == SS_CONNECTING) {
+                       sk->sk_state = TCP_CLOSE;
+
+                       if (sk->sk_state == TCP_SYN_SENT) {
                                /* The peer may detach from a queue pair while
                                 * we are still in the connecting state, i.e.,
                                 * if the peer VM is killed after attaching to
@@ -843,12 +845,10 @@ static void vmci_transport_handle_detach(struct sock *sk)
                                 * event like a reset.
                                 */
 
-                               sk->sk_state = SS_UNCONNECTED;
                                sk->sk_err = ECONNRESET;
                                sk->sk_error_report(sk);
                                return;
                        }
-                       sk->sk_state = SS_UNCONNECTED;
                }
                sk->sk_state_change(sk);
        }
@@ -916,17 +916,17 @@ static void vmci_transport_recv_pkt_work(struct 
work_struct *work)
        vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
 
        switch (sk->sk_state) {
-       case VSOCK_SS_LISTEN:
+       case TCP_LISTEN:
                vmci_transport_recv_listen(sk, pkt);
                break;
-       case SS_CONNECTING:
+       case TCP_SYN_SENT:
                /* Processing of pending connections for servers goes through
                 * the listening socket, so see vmci_transport_recv_listen()
                 * for that path.
                 */
                vmci_transport_recv_connecting_client(sk, pkt);
                break;
-       case SS_CONNECTED:
+       case TCP_ESTABLISHED:
                vmci_transport_recv_connected(sk, pkt);
                break;
        default:
@@ -975,7 +975,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
                vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context;
 
                switch (pending->sk_state) {
-               case SS_CONNECTING:
+               case TCP_SYN_SENT:
                        err = vmci_transport_recv_connecting_server(sk,
                                                                    pending,
                                                                    pkt);
@@ -1105,7 +1105,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
        vsock_add_pending(sk, pending);
        sk->sk_ack_backlog++;
 
-       pending->sk_state = SS_CONNECTING;
+       pending->sk_state = TCP_SYN_SENT;
        vmci_trans(vpending)->produce_size =
                vmci_trans(vpending)->consume_size = qp_size;
        vmci_trans(vpending)->queue_pair_size = qp_size;
@@ -1229,11 +1229,11 @@ vmci_transport_recv_connecting_server(struct sock 
*listener,
         * the socket will be valid until it is removed from the queue.
         *
         * If we fail sending the attach below, we remove the socket from the
-        * connected list and move the socket to SS_UNCONNECTED before
+        * connected list and move the socket to TCP_CLOSE before
         * releasing the lock, so a pending slow path processing of an incoming
         * packet will not see the socket in the connected state in that case.
         */
-       pending->sk_state = SS_CONNECTED;
+       pending->sk_state = TCP_ESTABLISHED;
 
        vsock_insert_connected(vpending);
 
@@ -1264,7 +1264,7 @@ vmci_transport_recv_connecting_server(struct sock 
*listener,
 
 destroy:
        pending->sk_err = skerr;
-       pending->sk_state = SS_UNCONNECTED;
+       pending->sk_state = TCP_CLOSE;
        /* As long as we drop our reference, all necessary cleanup will handle
         * when the cleanup function drops its reference and our destruct
         * implementation is called.  Note that since the listen handler will
@@ -1302,7 +1302,7 @@ vmci_transport_recv_connecting_client(struct sock *sk,
                 * accounting (it can already be found since it's in the bound
                 * table).
                 */
-               sk->sk_state = SS_CONNECTED;
+               sk->sk_state = TCP_ESTABLISHED;
                sk->sk_socket->state = SS_CONNECTED;
                vsock_insert_connected(vsk);
                sk->sk_state_change(sk);
@@ -1370,7 +1370,7 @@ vmci_transport_recv_connecting_client(struct sock *sk,
 destroy:
        vmci_transport_send_reset(sk, pkt);
 
-       sk->sk_state = SS_UNCONNECTED;
+       sk->sk_state = TCP_CLOSE;
        sk->sk_err = skerr;
        sk->sk_error_report(sk);
        return err;
@@ -1558,7 +1558,7 @@ static int vmci_transport_recv_connected(struct sock *sk,
                sock_set_flag(sk, SOCK_DONE);
                vsk->peer_shutdown = SHUTDOWN_MASK;
                if (vsock_stream_has_data(vsk) <= 0)
-                       sk->sk_state = SS_DISCONNECTING;
+                       sk->sk_state = TCP_CLOSING;
 
                sk->sk_state_change(sk);
                break;
@@ -1826,7 +1826,7 @@ static int vmci_transport_connect(struct vsock_sock *vsk)
                err = vmci_transport_send_conn_request(
                        sk, vmci_trans(vsk)->queue_pair_size);
                if (err < 0) {
-                       sk->sk_state = SS_UNCONNECTED;
+                       sk->sk_state = TCP_CLOSE;
                        return err;
                }
        } else {
@@ -1836,7 +1836,7 @@ static int vmci_transport_connect(struct vsock_sock *vsk)
                                sk, vmci_trans(vsk)->queue_pair_size,
                                supported_proto_versions);
                if (err < 0) {
-                       sk->sk_state = SS_UNCONNECTED;
+                       sk->sk_state = TCP_CLOSE;
                        return err;
                }
 
diff --git a/net/vmw_vsock/vmci_transport_notify.c 
b/net/vmw_vsock/vmci_transport_notify.c
index 1406db4d97d1..41fb427f150a 100644
--- a/net/vmw_vsock/vmci_transport_notify.c
+++ b/net/vmw_vsock/vmci_transport_notify.c
@@ -355,7 +355,7 @@ vmci_transport_notify_pkt_poll_in(struct sock *sk,
                 * queue. Ask for notifications when there is something to
                 * read.
                 */
-               if (sk->sk_state == SS_CONNECTED) {
+               if (sk->sk_state == TCP_ESTABLISHED) {
                        if (!send_waiting_read(sk, 1))
                                return -1;
 
diff --git a/net/vmw_vsock/vmci_transport_notify_qstate.c 
b/net/vmw_vsock/vmci_transport_notify_qstate.c
index f3a0afc46208..0cc84f2bb05e 100644
--- a/net/vmw_vsock/vmci_transport_notify_qstate.c
+++ b/net/vmw_vsock/vmci_transport_notify_qstate.c
@@ -176,7 +176,7 @@ vmci_transport_notify_pkt_poll_in(struct sock *sk,
                 * queue. Ask for notifications when there is something to
                 * read.
                 */
-               if (sk->sk_state == SS_CONNECTED)
+               if (sk->sk_state == TCP_ESTABLISHED)
                        vsock_block_update_write_window(sk);
                *data_ready_now = false;
        }

Reply via email to