Author: hselasky
Date: Thu Mar  8 09:47:09 2018
New Revision: 330644
URL: https://svnweb.freebsd.org/changeset/base/330644

Log:
  Updates for PCI and health monitor recovery in mlx5core.
  This patch accumulates the following Linux commits:
  
  mlx5_health.c
  - 78ccb25861d76a8fc5c678d762180e6918834200
    mlx5_core: Fix wrong name in struct
  - 171bb2c560f45c0427ca3776a4c8f4e26e559400
    mlx5_core: Update health syndromes
  - 0144a95e2ad53a40c62148f44fb0c1f9d2a0d1e9
    mlx5_core: Use accessor functions to read from device memory
  - ac6ea6e81a80172612e0c9ef93720f371b198918
    mlx5_core: Use private health thread for each device
  - fd76ee4da55abb21babfc69310d321b9cb9a32e0
    mlx5_core: Fix internal error detection conditions
  - 2241007b3d783cbdbaa78c30bdb1994278b6f9b9
    mlx5: Clear health sick bit when starting health poll
  - 712bfef60912d91033cb25739f7444d5b8d8c59f
    mlx5: Fix version printout in case of health issue
  - 89d44f0a6c732db23b219be708e2fe1e03ee4842
    mlx5_core: Add pci error handlers to mlx5_core driver
  
  mlx5_cmd.c
  - be87544de8df2b1eb34bcb5e32691287d96f9ec4
    mlx5_core: Fix async commands return code
  - a31208b1e11df334d443ec8cace7636150bb8ce2
    mlx5_core: New init and exit flow for mlx5_core
  - 020446e01eebc9dbe7eda038e570ab9c7ab13586
    mlx5_core: Prepare cmd interface to system errors handling
  - 89d44f0a6c732db23b219be708e2fe1e03ee4842
    mlx5_core: Add pci error handlers to mlx5_core driver
  - 0d834442cc247c7b3f3bd6019512ae03e96dd99a
    mlx5: Fix teardown errors that happen in pci error handler
  
  mlx5_main.c
  - 5fc7197d3a256d9c5de3134870304b24892a4908
    mlx5: Add pci shutdown callback
  
  Submitted by: Matthew Finlay <m...@mellanox.com>
  MFC after:    1 week
  Sponsored by: Mellanox Technologies

Modified:
  head/sys/dev/mlx5/device.h
  head/sys/dev/mlx5/driver.h
  head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
  head/sys/dev/mlx5/mlx5_core/mlx5_core.h
  head/sys/dev/mlx5/mlx5_core/mlx5_cq.c
  head/sys/dev/mlx5/mlx5_core/mlx5_eq.c
  head/sys/dev/mlx5/mlx5_core/mlx5_health.c
  head/sys/dev/mlx5/mlx5_core/mlx5_main.c
  head/sys/dev/mlx5/mlx5_core/mlx5_mr.c
  head/sys/dev/mlx5/mlx5_core/mlx5_qp.c
  head/sys/dev/mlx5/mlx5_core/mlx5_srq.c

Modified: head/sys/dev/mlx5/device.h
==============================================================================
--- head/sys/dev/mlx5/device.h  Thu Mar  8 08:04:32 2018        (r330643)
+++ head/sys/dev/mlx5/device.h  Thu Mar  8 09:47:09 2018        (r330644)
@@ -417,7 +417,7 @@ struct mlx5_health_buffer {
        __be32          rsvd2;
        u8              irisc_index;
        u8              synd;
-       __be16          ext_sync;
+       __be16          ext_synd;
 };
 
 struct mlx5_init_seg {

Modified: head/sys/dev/mlx5/driver.h
==============================================================================
--- head/sys/dev/mlx5/driver.h  Thu Mar  8 08:04:32 2018        (r330643)
+++ head/sys/dev/mlx5/driver.h  Thu Mar  8 09:47:09 2018        (r330644)
@@ -479,9 +479,11 @@ struct mlx5_core_health {
        struct mlx5_health_buffer __iomem       *health;
        __be32 __iomem                 *health_counter;
        struct timer_list               timer;
-       struct list_head                list;
        u32                             prev;
        int                             miss_counter;
+       bool                            sick;
+       struct workqueue_struct        *wq;
+       struct work_struct              work;
 };
 
 #define        MLX5_CQ_LINEAR_ARRAY_SIZE       1024
@@ -583,6 +585,17 @@ enum mlx5_device_state {
        MLX5_DEVICE_STATE_INTERNAL_ERROR,
 };
 
+enum mlx5_interface_state {
+       MLX5_INTERFACE_STATE_DOWN = BIT(0),
+       MLX5_INTERFACE_STATE_UP = BIT(1),
+       MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
+};
+
+enum mlx5_pci_status {
+       MLX5_PCI_STATUS_DISABLED,
+       MLX5_PCI_STATUS_ENABLED,
+};
+
 struct mlx5_special_contexts {
        int resd_lkey;
 };
@@ -590,6 +603,9 @@ struct mlx5_special_contexts {
 struct mlx5_flow_root_namespace;
 struct mlx5_core_dev {
        struct pci_dev         *pdev;
+       /* sync pci state */
+       struct mutex            pci_status_mutex;
+       enum mlx5_pci_status    pci_status;
        char                    board_id[MLX5_BOARD_ID_LEN];
        struct mlx5_cmd         cmd;
        struct mlx5_port_caps   port_caps[MLX5_MAX_PORTS];
@@ -598,6 +614,9 @@ struct mlx5_core_dev {
        phys_addr_t             iseg_base;
        struct mlx5_init_seg __iomem *iseg;
        enum mlx5_device_state  state;
+       /* sync interface state */
+       struct mutex            intf_state_mutex;
+       unsigned long           intf_state;
        void                    (*event) (struct mlx5_core_dev *dev,
                                          enum mlx5_dev_event event,
                                          unsigned long param);
@@ -849,8 +868,8 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct
 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
 int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
-void mlx5_health_cleanup(void);
-void  __init mlx5_health_init(void);
+void mlx5_health_cleanup(struct mlx5_core_dev *dev);
+int mlx5_health_init(struct mlx5_core_dev *dev);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
 
@@ -908,7 +927,6 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn
 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector);
-void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev);
 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 
vecidx,
                       int nent, u64 mask, const char *name, struct mlx5_uar 
*uar);
@@ -1026,6 +1044,10 @@ struct mlx5_profile {
 
 enum {
        MLX5_PCI_DEV_IS_VF              = 1 << 0,
+};
+
+enum {
+       MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
 };
 
 static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c      Thu Mar  8 08:04:32 2018        
(r330643)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c      Thu Mar  8 09:47:09 2018        
(r330644)
@@ -295,6 +295,158 @@ static void dump_buf(void *buf, int size, int data_onl
                pr_debug("\n");
 }
 
+enum {
+       MLX5_DRIVER_STATUS_ABORTED = 0xfe,
+       MLX5_DRIVER_SYND = 0xbadd00de,
+};
+
+static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
+                                      u32 *synd, u8 *status)
+{
+       *synd = 0;
+       *status = 0;
+
+       switch (op) {
+       case MLX5_CMD_OP_TEARDOWN_HCA:
+       case MLX5_CMD_OP_DISABLE_HCA:
+       case MLX5_CMD_OP_MANAGE_PAGES:
+       case MLX5_CMD_OP_DESTROY_MKEY:
+       case MLX5_CMD_OP_DESTROY_EQ:
+       case MLX5_CMD_OP_DESTROY_CQ:
+       case MLX5_CMD_OP_DESTROY_QP:
+       case MLX5_CMD_OP_DESTROY_PSV:
+       case MLX5_CMD_OP_DESTROY_SRQ:
+       case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+       case MLX5_CMD_OP_DESTROY_DCT:
+       case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+       case MLX5_CMD_OP_DEALLOC_PD:
+       case MLX5_CMD_OP_DEALLOC_UAR:
+       case MLX5_CMD_OP_DETACH_FROM_MCG:
+       case MLX5_CMD_OP_DEALLOC_XRCD:
+       case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
+       case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
+       case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
+       case MLX5_CMD_OP_DESTROY_TIR:
+       case MLX5_CMD_OP_DESTROY_SQ:
+       case MLX5_CMD_OP_DESTROY_RQ:
+       case MLX5_CMD_OP_DESTROY_RMP:
+       case MLX5_CMD_OP_DESTROY_TIS:
+       case MLX5_CMD_OP_DESTROY_RQT:
+       case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+       case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
+       case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+       case MLX5_CMD_OP_2ERR_QP:
+       case MLX5_CMD_OP_2RST_QP:
+       case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+       case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+       case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+       case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
+               return MLX5_CMD_STAT_OK;
+
+       case MLX5_CMD_OP_QUERY_HCA_CAP:
+       case MLX5_CMD_OP_QUERY_ADAPTER:
+       case MLX5_CMD_OP_INIT_HCA:
+       case MLX5_CMD_OP_ENABLE_HCA:
+       case MLX5_CMD_OP_QUERY_PAGES:
+       case MLX5_CMD_OP_SET_HCA_CAP:
+       case MLX5_CMD_OP_QUERY_ISSI:
+       case MLX5_CMD_OP_SET_ISSI:
+       case MLX5_CMD_OP_CREATE_MKEY:
+       case MLX5_CMD_OP_QUERY_MKEY:
+       case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
+       case MLX5_CMD_OP_PAGE_FAULT_RESUME:
+       case MLX5_CMD_OP_CREATE_EQ:
+       case MLX5_CMD_OP_QUERY_EQ:
+       case MLX5_CMD_OP_GEN_EQE:
+       case MLX5_CMD_OP_CREATE_CQ:
+       case MLX5_CMD_OP_QUERY_CQ:
+       case MLX5_CMD_OP_MODIFY_CQ:
+       case MLX5_CMD_OP_CREATE_QP:
+       case MLX5_CMD_OP_RST2INIT_QP:
+       case MLX5_CMD_OP_INIT2RTR_QP:
+       case MLX5_CMD_OP_RTR2RTS_QP:
+       case MLX5_CMD_OP_RTS2RTS_QP:
+       case MLX5_CMD_OP_SQERR2RTS_QP:
+       case MLX5_CMD_OP_QUERY_QP:
+       case MLX5_CMD_OP_SQD_RTS_QP:
+       case MLX5_CMD_OP_INIT2INIT_QP:
+       case MLX5_CMD_OP_CREATE_PSV:
+       case MLX5_CMD_OP_CREATE_SRQ:
+       case MLX5_CMD_OP_QUERY_SRQ:
+       case MLX5_CMD_OP_ARM_RQ:
+       case MLX5_CMD_OP_CREATE_XRC_SRQ:
+       case MLX5_CMD_OP_QUERY_XRC_SRQ:
+       case MLX5_CMD_OP_ARM_XRC_SRQ:
+       case MLX5_CMD_OP_CREATE_DCT:
+       case MLX5_CMD_OP_DRAIN_DCT:
+       case MLX5_CMD_OP_QUERY_DCT:
+       case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+       case MLX5_CMD_OP_QUERY_VPORT_STATE:
+       case MLX5_CMD_OP_MODIFY_VPORT_STATE:
+       case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+       case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
+       case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+       case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+       case MLX5_CMD_OP_SET_ROCE_ADDRESS:
+       case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+       case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+       case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
+       case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+       case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+       case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+       case MLX5_CMD_OP_QUERY_Q_COUNTER:
+       case MLX5_CMD_OP_ALLOC_PD:
+       case MLX5_CMD_OP_ALLOC_UAR:
+       case MLX5_CMD_OP_CONFIG_INT_MODERATION:
+       case MLX5_CMD_OP_ACCESS_REG:
+       case MLX5_CMD_OP_ATTACH_TO_MCG:
+       case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+       case MLX5_CMD_OP_MAD_IFC:
+       case MLX5_CMD_OP_QUERY_MAD_DEMUX:
+       case MLX5_CMD_OP_SET_MAD_DEMUX:
+       case MLX5_CMD_OP_NOP:
+       case MLX5_CMD_OP_ALLOC_XRCD:
+       case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+       case MLX5_CMD_OP_QUERY_CONG_STATUS:
+       case MLX5_CMD_OP_MODIFY_CONG_STATUS:
+       case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+       case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
+       case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+       case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+       case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+       case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+       case MLX5_CMD_OP_CREATE_TIR:
+       case MLX5_CMD_OP_MODIFY_TIR:
+       case MLX5_CMD_OP_QUERY_TIR:
+       case MLX5_CMD_OP_CREATE_SQ:
+       case MLX5_CMD_OP_MODIFY_SQ:
+       case MLX5_CMD_OP_QUERY_SQ:
+       case MLX5_CMD_OP_CREATE_RQ:
+       case MLX5_CMD_OP_MODIFY_RQ:
+       case MLX5_CMD_OP_QUERY_RQ:
+       case MLX5_CMD_OP_CREATE_RMP:
+       case MLX5_CMD_OP_MODIFY_RMP:
+       case MLX5_CMD_OP_QUERY_RMP:
+       case MLX5_CMD_OP_CREATE_TIS:
+       case MLX5_CMD_OP_MODIFY_TIS:
+       case MLX5_CMD_OP_QUERY_TIS:
+       case MLX5_CMD_OP_CREATE_RQT:
+       case MLX5_CMD_OP_MODIFY_RQT:
+       case MLX5_CMD_OP_QUERY_RQT:
+       case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+       case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+       case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+       case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+       case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+               *status = MLX5_DRIVER_STATUS_ABORTED;
+               *synd = MLX5_DRIVER_SYND;
+               return -EIO;
+       default:
+               mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
+               return -EINVAL;
+       }
+}
+
 const char *mlx5_command_str(int command)
 {
         #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: 
return #__cmd
@@ -743,6 +895,7 @@ static void complete_command(struct mlx5_cmd_work_ent 
                mlx5_free_cmd_msg(dev, ent->out);
                free_msg(dev, ent->in);
 
+               err = err ? err : ent->status;
                free_cmd(ent);
                callback(err, context);
        } else {
@@ -861,6 +1014,16 @@ static int wait_func(struct mlx5_core_dev *dev, struct
        return err;
 }
 
+static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out)
+{
+       return &out->syndrome;
+}
+
+static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
+{
+       return &out->status;
+}
+
 /*  Notes:
  *    1. Callback functions may not sleep
  *    2. page queue commands do not support asynchrous completion
@@ -1102,6 +1265,10 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, 
                        else
                                ent->ret = 0;
                        ent->status = ent->lay->status_own >> 1;
+                       if (vector & MLX5_TRIGGERED_CMD_COMP)
+                               ent->status = MLX5_DRIVER_STATUS_ABORTED;
+                       else
+                               ent->status = ent->lay->status_own >> 1;
 
                        mlx5_core_dbg(dev,
                                      "FW command ret 0x%x, status %s(0x%x)\n",
@@ -1115,33 +1282,6 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, 
 }
 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
 
-void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev)
-{
-       unsigned long vector;
-       int i = 0;
-       unsigned long flags;
-       synchronize_irq(dev->priv.eq_table.cmd_eq.irqn);
-       spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
-       vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
-       spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
-
-       if (!vector)
-               return;
-
-       for (i = 0; i < (1 << dev->cmd.log_sz); i++) {
-               struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i];
-
-               if (!test_bit(i, &vector))
-                       continue;
-
-               while (ent->busy)
-                       usleep_range(1000, 1100);
-               free_ent(&dev->cmd, i);
-               complete_command(ent);
-       }
-}
-EXPORT_SYMBOL(mlx5_trigger_cmd_completions);
-
 static int status_to_err(u8 status)
 {
        return status ? -1 : 0; /* TBD more meaningful codes */
@@ -1175,6 +1315,11 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core
        return msg;
 }
 
+static u16 opcode_from_in(struct mlx5_inbox_hdr *in)
+{
+       return be16_to_cpu(in->opcode);
+}
+
 static int is_manage_pages(struct mlx5_inbox_hdr *in)
 {
        return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
@@ -1191,7 +1336,16 @@ static int cmd_exec_helper(struct mlx5_core_dev *dev,
        const gfp_t gfp = GFP_KERNEL;
        int err;
        u8 status = 0;
+       u32 drv_synd;
 
+       if (pci_channel_offline(dev->pdev) ||
+           dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+               err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), 
&drv_synd, &status);
+               *get_synd_ptr(out) = cpu_to_be32(drv_synd);
+               *get_status_ptr(out) = status;
+               return err;
+       }
+
        pages_queue = is_manage_pages(in);
 
        inb = alloc_msg(dev, in_size, gfp);
@@ -1377,6 +1531,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        int err;
        int i;
 
+       memset(cmd, 0, sizeof(*cmd));
        cmd_if_rev = cmdif_rev_get(dev);
        if (cmd_if_rev != CMD_IF_REV) {
                device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif 
rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev);

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_core.h
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_core.h     Thu Mar  8 08:04:32 2018        
(r330643)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_core.h     Thu Mar  8 09:47:09 2018        
(r330644)
@@ -70,6 +70,10 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
 int mlx5_query_board_id(struct mlx5_core_dev *dev);
 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+                    unsigned long param);
+void mlx5_enter_error_state(struct mlx5_core_dev *dev);
+void mlx5_disable_device(struct mlx5_core_dev *dev);
 
 void mlx5e_init(void);
 void mlx5e_cleanup(void);

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_cq.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_cq.c       Thu Mar  8 08:04:32 2018        
(r330643)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_cq.c       Thu Mar  8 09:47:09 2018        
(r330644)
@@ -294,6 +294,7 @@ int mlx5_init_cq_table(struct mlx5_core_dev *dev)
        int err;
        int x;
 
+       memset(table, 0, sizeof(*table));
        spin_lock_init(&table->lock);
        for (x = 0; x != MLX5_CQ_LINEAR_ARRAY_SIZE; x++)
                spin_lock_init(&table->linear_array[x].lock);

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_eq.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_eq.c       Thu Mar  8 08:04:32 2018        
(r330643)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_eq.c       Thu Mar  8 09:47:09 2018        
(r330644)
@@ -398,6 +398,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, stru
        int inlen;
 
        eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
+       eq->cons_index = 0;
        err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
                             &eq->buf);
        if (err)

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_health.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_health.c   Thu Mar  8 08:04:32 2018        
(r330643)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_health.c   Thu Mar  8 09:47:09 2018        
(r330644)
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/vmalloc.h>
+#include <linux/hardirq.h>
 #include <dev/mlx5/driver.h>
 #include <dev/mlx5/mlx5_ifc.h>
 #include "mlx5_core.h"
@@ -36,31 +37,113 @@
 #define        MLX5_HEALTH_POLL_INTERVAL       (2 * HZ)
 #define        MAX_MISSES                      3
 
-static DEFINE_SPINLOCK(health_lock);
-static LIST_HEAD(health_list);
-static struct work_struct health_work;
+enum {
+       MLX5_NIC_IFC_FULL               = 0,
+       MLX5_NIC_IFC_DISABLED           = 1,
+       MLX5_NIC_IFC_NO_DRAM_NIC        = 2
+};
 
+static u8 get_nic_interface(struct mlx5_core_dev *dev)
+{
+       return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
+}
+
+static void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev)
+{
+       unsigned long flags;
+       u64 vector;
+
+       /* wait for pending handlers to complete */
+       synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
+       spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+       vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+       if (!vector)
+               goto no_trig;
+
+       vector |= MLX5_TRIGGERED_CMD_COMP;
+       spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+       mlx5_core_dbg(dev, "vector 0x%lx\n", vector);
+       mlx5_cmd_comp_handler(dev, vector);
+       return;
+
+no_trig:
+       spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+}
+
+static int in_fatal(struct mlx5_core_dev *dev)
+{
+       struct mlx5_core_health *health = &dev->priv.health;
+       struct mlx5_health_buffer __iomem *h = health->health;
+
+       if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+               return 1;
+
+       if (ioread32be(&h->fw_ver) == 0xffffffff)
+               return 1;
+
+       return 0;
+}
+
+void mlx5_enter_error_state(struct mlx5_core_dev *dev)
+{
+       if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+               return;
+
+       mlx5_core_err(dev, "start\n");
+       if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+               dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+
+       mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
+       mlx5_core_err(dev, "end\n");
+}
+
+static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
+{
+       u8 nic_interface = get_nic_interface(dev);
+
+       switch (nic_interface) {
+       case MLX5_NIC_IFC_FULL:
+               mlx5_core_warn(dev, "Expected to see disabled NIC but it is 
full driver\n");
+               break;
+
+       case MLX5_NIC_IFC_DISABLED:
+               mlx5_core_warn(dev, "starting teardown\n");
+               break;
+
+       case MLX5_NIC_IFC_NO_DRAM_NIC:
+               mlx5_core_warn(dev, "Expected to see disabled NIC but it is no 
dram nic\n");
+               break;
+       default:
+               mlx5_core_warn(dev, "Expected to see disabled NIC but it is has 
invalid value %d\n",
+                              nic_interface);
+       }
+
+       mlx5_disable_device(dev);
+}
+
 static void health_care(struct work_struct *work)
 {
-       struct mlx5_core_health *health, *n;
+       struct mlx5_core_health *health;
        struct mlx5_core_dev *dev;
        struct mlx5_priv *priv;
-       LIST_HEAD(tlist);
 
-       spin_lock_irq(&health_lock);
-       list_splice_init(&health_list, &tlist);
+       health = container_of(work, struct mlx5_core_health, work);
+       priv = container_of(health, struct mlx5_priv, health);
+       dev = container_of(priv, struct mlx5_core_dev, priv);
+       mlx5_core_warn(dev, "handling bad device here\n");
+       mlx5_handle_bad_state(dev);
+}
 
-       spin_unlock_irq(&health_lock);
+static int get_next_poll_jiffies(void)
+{
+       unsigned long next;
 
-       list_for_each_entry_safe(health, n, &tlist, list) {
-               priv = container_of(health, struct mlx5_priv, health);
-               dev = container_of(priv, struct mlx5_core_dev, priv);
-               mlx5_core_warn(dev, "handling bad device here\n");
-               /* nothing yet */
-               spin_lock_irq(&health_lock);
-               list_del_init(&health->list);
-               spin_unlock_irq(&health_lock);
-       }
+       get_random_bytes(&next, sizeof(next));
+       next %= HZ;
+       next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+
+       return next;
 }
 
 static const char *hsynd_str(u8 synd)
@@ -70,6 +153,8 @@ static const char *hsynd_str(u8 synd)
                return "firmware internal error";
        case MLX5_HEALTH_SYNDR_IRISC_ERR:
                return "irisc not responding";
+       case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
+               return "unrecoverable hardware error";
        case MLX5_HEALTH_SYNDR_CRC_ERR:
                return "firmware CRC error";
        case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
@@ -80,51 +165,59 @@ static const char *hsynd_str(u8 synd)
                return "async EQ buffer overrun";
        case MLX5_HEALTH_SYNDR_EQ_ERR:
                return "EQ error";
+       case MLX5_HEALTH_SYNDR_EQ_INV:
+               return "Invalid EQ referenced";
        case MLX5_HEALTH_SYNDR_FFSER_ERR:
                return "FFSER error";
+       case MLX5_HEALTH_SYNDR_HIGH_TEMP:
+               return "High temprature";
        default:
                return "unrecognized error";
        }
 }
 
-static u16 read_be16(__be16 __iomem *p)
-{
-       return swab16(readl((__force u16 __iomem *) p));
-}
-
-static u32 read_be32(__be32 __iomem *p)
-{
-       return swab32(readl((__force u32 __iomem *) p));
-}
-
 static void print_health_info(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_health *health = &dev->priv.health;
        struct mlx5_health_buffer __iomem *h = health->health;
+       char fw_str[18];
+       u32 fw;
        int i;
 
+       /* If the syndrom is 0, the device is OK and no need to print buffer */
+       if (!ioread8(&h->synd))
+               return;
+
        for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
-               printf("mlx5_core: INFO: ""assert_var[%d] 0x%08x\n", i, 
read_be32(h->assert_var + i));
+               printf("mlx5_core: INFO: ""assert_var[%d] 0x%08x\n", i, 
ioread32be(h->assert_var + i));
 
-       printf("mlx5_core: INFO: ""assert_exit_ptr 0x%08x\n", 
read_be32(&h->assert_exit_ptr));
-       printf("mlx5_core: INFO: ""assert_callra 0x%08x\n", 
read_be32(&h->assert_callra));
-       printf("mlx5_core: INFO: ""fw_ver 0x%08x\n", read_be32(&h->fw_ver));
-       printf("mlx5_core: INFO: ""hw_id 0x%08x\n", read_be32(&h->hw_id));
-       printf("mlx5_core: INFO: ""irisc_index %d\n", readb(&h->irisc_index));
-       printf("mlx5_core: INFO: ""synd 0x%x: %s\n", readb(&h->synd), 
hsynd_str(readb(&h->synd)));
-       printf("mlx5_core: INFO: ""ext_sync 0x%04x\n", read_be16(&h->ext_sync));
+       printf("mlx5_core: INFO: ""assert_exit_ptr 0x%08x\n", 
ioread32be(&h->assert_exit_ptr));
+       printf("mlx5_core: INFO: ""assert_callra 0x%08x\n", 
ioread32be(&h->assert_callra));
+       snprintf(fw_str, sizeof(fw_str), "%d.%d.%d", fw_rev_maj(dev), 
fw_rev_min(dev), fw_rev_sub(dev));
+       printf("mlx5_core: INFO: ""fw_ver %s\n", fw_str);
+       printf("mlx5_core: INFO: ""hw_id 0x%08x\n", ioread32be(&h->hw_id));
+       printf("mlx5_core: INFO: ""irisc_index %d\n", ioread8(&h->irisc_index));
+       printf("mlx5_core: INFO: ""synd 0x%x: %s\n", ioread8(&h->synd), 
hsynd_str(ioread8(&h->synd)));
+       printf("mlx5_core: INFO: ""ext_synd 0x%04x\n", 
ioread16be(&h->ext_synd));
+       fw = ioread32be(&h->fw_ver);
+       printf("mlx5_core: INFO: ""raw fw_ver 0x%08x\n", fw);
 }
 
 static void poll_health(unsigned long data)
 {
        struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
        struct mlx5_core_health *health = &dev->priv.health;
-       int next;
        u32 count;
 
        if (dev->state != MLX5_DEVICE_STATE_UP)
                return;
 
+       if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+               mlx5_trigger_cmd_completions(dev);
+               mod_timer(&health->timer, get_next_poll_jiffies());
+               return;
+       }
+
        count = ioread32be(health->health_counter);
        if (count == health->prev)
                ++health->miss_counter;
@@ -133,28 +226,25 @@ static void poll_health(unsigned long data)
 
        health->prev = count;
        if (health->miss_counter == MAX_MISSES) {
-               mlx5_core_err(dev, "device's health compromised\n");
+               mlx5_core_err(dev, "device's health compromised - reached miss 
count\n");
                print_health_info(dev);
-               spin_lock_irq(&health_lock);
-               list_add_tail(&health->list, &health_list);
-               spin_unlock_irq(&health_lock);
-
-               if (!queue_work(mlx5_core_wq, &health_work))
-                       mlx5_core_warn(dev, "failed to queue health work\n");
        } else {
-               get_random_bytes(&next, sizeof(next));
-               next %= HZ;
-               next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
-               mod_timer(&health->timer, next);
+               mod_timer(&health->timer, get_next_poll_jiffies());
        }
+
+       if (in_fatal(dev) && !health->sick) {
+               health->sick = true;
+               print_health_info(dev);
+               queue_work(health->wq, &health->work);
+       }
 }
 
 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_health *health = &dev->priv.health;
 
-       INIT_LIST_HEAD(&health->list);
        init_timer(&health->timer);
+       health->sick = 0;
        health->health = &dev->iseg->health;
        health->health_counter = &dev->iseg->health_counter;
 
@@ -168,19 +258,35 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
        struct mlx5_core_health *health = &dev->priv.health;
 
        del_timer_sync(&health->timer);
-
-       spin_lock_irq(&health_lock);
-       if (!list_empty(&health->list))
-               list_del_init(&health->list);
-       spin_unlock_irq(&health_lock);
 }
 
-void mlx5_health_cleanup(void)
+void mlx5_health_cleanup(struct mlx5_core_dev *dev)
 {
+       struct mlx5_core_health *health = &dev->priv.health;
+
+       destroy_workqueue(health->wq);
 }
 
-void  __init mlx5_health_init(void)
+#define HEALTH_NAME "mlx5_health"
+int mlx5_health_init(struct mlx5_core_dev *dev)
 {
+       struct mlx5_core_health *health;
+       char *name;
+       int len;
 
-       INIT_WORK(&health_work, health_care);
+       health = &dev->priv.health;
+       len = strlen(HEALTH_NAME) + strlen(dev_name(&dev->pdev->dev));
+       name = kmalloc(len + 1, GFP_KERNEL);
+       if (!name)
+               return -ENOMEM;
+
+       snprintf(name, len, "%s:%s", HEALTH_NAME, dev_name(&dev->pdev->dev));
+       health->wq = create_singlethread_workqueue(name);
+       kfree(name);
+       if (!health->wq)
+               return -ENOMEM;
+
+       INIT_WORK(&health->work, health_care);
+
+       return 0;
 }

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_main.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_main.c     Thu Mar  8 08:04:32 2018        
(r330643)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_main.c     Thu Mar  8 09:47:09 2018        
(r330644)
@@ -63,7 +63,6 @@ MODULE_PARM_DESC(prof_sel, "profile selector. Valid ra
 
 #define NUMA_NO_NODE       -1
 
-struct workqueue_struct *mlx5_core_wq;
 static LIST_HEAD(intf_list);
 static LIST_HEAD(dev_list);
 static DEFINE_MUTEX(intf_mutex);
@@ -186,6 +185,34 @@ static int set_dma_caps(struct pci_dev *pdev)
        return err;
 }
 
+static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
+{
+       struct pci_dev *pdev = dev->pdev;
+       int err = 0;
+
+       mutex_lock(&dev->pci_status_mutex);
+       if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
+               err = pci_enable_device(pdev);
+               if (!err)
+                       dev->pci_status = MLX5_PCI_STATUS_ENABLED;
+       }
+       mutex_unlock(&dev->pci_status_mutex);
+
+       return err;
+}
+
+static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
+{
+       struct pci_dev *pdev = dev->pdev;
+
+       mutex_lock(&dev->pci_status_mutex);
+       if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
+               pci_disable_device(pdev);
+               dev->pci_status = MLX5_PCI_STATUS_DISABLED;
+       }
+       mutex_unlock(&dev->pci_status_mutex);
+}
+
 static int request_bar(struct pci_dev *pdev)
 {
        int err = 0;
@@ -680,12 +707,128 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32
        return err;
 }
 
-static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
+static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv 
*priv)
 {
+       struct mlx5_device_context *dev_ctx;
+       struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, 
priv);
+
+       dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
+       if (!dev_ctx)
+               return;
+
+       dev_ctx->intf    = intf;
+       CURVNET_SET_QUIET(vnet0);
+       dev_ctx->context = intf->add(dev);
+       CURVNET_RESTORE();
+
+       if (dev_ctx->context) {
+               spin_lock_irq(&priv->ctx_lock);
+               list_add_tail(&dev_ctx->list, &priv->ctx_list);
+               spin_unlock_irq(&priv->ctx_lock);
+       } else {
+               kfree(dev_ctx);
+       }
+}
+
+static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv 
*priv)
+{
+       struct mlx5_device_context *dev_ctx;
+       struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, 
priv);
+
+       list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+               if (dev_ctx->intf == intf) {
+                       spin_lock_irq(&priv->ctx_lock);
+                       list_del(&dev_ctx->list);
+                       spin_unlock_irq(&priv->ctx_lock);
+
+                       intf->remove(dev, dev_ctx->context);
+                       kfree(dev_ctx);
+                       return;
+               }
+}
+
+static int mlx5_register_device(struct mlx5_core_dev *dev)
+{
        struct mlx5_priv *priv = &dev->priv;
-       int err;
+       struct mlx5_interface *intf;
 
-       dev->pdev = pdev;
+       mutex_lock(&intf_mutex);
+       list_add_tail(&priv->dev_list, &dev_list);
+       list_for_each_entry(intf, &intf_list, list)
+               mlx5_add_device(intf, priv);
+       mutex_unlock(&intf_mutex);
+
+       return 0;
+}
+
+static void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+       struct mlx5_priv *priv = &dev->priv;
+       struct mlx5_interface *intf;
+
+       mutex_lock(&intf_mutex);
+       list_for_each_entry(intf, &intf_list, list)
+               mlx5_remove_device(intf, priv);
+       list_del(&priv->dev_list);
+       mutex_unlock(&intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+       struct mlx5_priv *priv;
+
+       if (!intf->add || !intf->remove)
+               return -EINVAL;
+
+       mutex_lock(&intf_mutex);
+       list_add_tail(&intf->list, &intf_list);
+       list_for_each_entry(priv, &dev_list, dev_list)
+               mlx5_add_device(intf, priv);
+       mutex_unlock(&intf_mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+       struct mlx5_priv *priv;
+
+       mutex_lock(&intf_mutex);
+       list_for_each_entry(priv, &dev_list, dev_list)
+               mlx5_remove_device(intf, priv);
+       list_del(&intf->list);
+       mutex_unlock(&intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+       struct mlx5_priv *priv = &mdev->priv;
+       struct mlx5_device_context *dev_ctx;
+       unsigned long flags;
+       void *result = NULL;
+
+       spin_lock_irqsave(&priv->ctx_lock, flags);
+
+       list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+               if ((dev_ctx->intf->protocol == protocol) &&
+                   dev_ctx->intf->get_dev) {
+                       result = dev_ctx->intf->get_dev(dev_ctx->context);
+                       break;
+               }
+
+       spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+       return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
+static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+       struct pci_dev *pdev = dev->pdev;
+       int err = 0;
+
        pci_set_drvdata(dev->pdev, dev);
        strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
        priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
@@ -696,7 +839,7 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, st
 
        priv->numa_node = NUMA_NO_NODE;
 
-       err = pci_enable_device(pdev);
+       err = mlx5_pci_enable_device(dev);
        if (err) {
                device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI 
device, aborting\n");
                goto err_dbg;
@@ -723,6 +866,38 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, st
                device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping 
initialization segment, aborting\n");
                goto err_clr_master;
        }
+
+        return 0;
+
+err_clr_master:
+       pci_clear_master(dev->pdev);
+       release_bar(dev->pdev);
+err_disable:
+       mlx5_pci_disable_device(dev);
+err_dbg:
+       return err;
+}
+
+static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+       iounmap(dev->iseg);
+       pci_clear_master(dev->pdev);
+       release_bar(dev->pdev);
+       mlx5_pci_disable_device(dev);
+}
+
+static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+       struct pci_dev *pdev = dev->pdev;
+       int err;
+
+       mutex_lock(&dev->intf_state_mutex);
+       if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+               dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
+                        __func__);
+               goto out;
+       }
+
        device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: 
%d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
 
        /*
@@ -734,7 +909,7 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, st
        err = mlx5_cmd_init(dev);
        if (err) {
                device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing 
command interface, aborting\n");
-               goto err_unmap;
+               goto out_err;
        }
 
        err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
@@ -859,8 +1034,21 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, st
                goto err_init_tables;
        }
 
+       err = mlx5_register_device(dev);
+       if (err) {
+               dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
+               goto err_reg_dev;
+       }
+
+       clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
+       set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+
+out:
+       mutex_unlock(&dev->intf_state_mutex);
        return 0;
 
+err_reg_dev:
+       mlx5_cleanup_fs(dev);
 err_init_tables:
        mlx5_cleanup_mr_table(dev);
        mlx5_cleanup_srq_table(dev);
@@ -884,7 +1072,7 @@ err_stop_poll:
        mlx5_stop_health_poll(dev);
        if (mlx5_cmd_teardown_hca(dev)) {
                device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca 
failed, skip cleanup\n");
-               return err;
+               goto out_err;
        }
 
 reclaim_boot_pages:
@@ -898,28 +1086,29 @@ err_disable_hca:
 
 err_pagealloc_cleanup:
        mlx5_pagealloc_cleanup(dev);
+
 err_cmd_cleanup:
        mlx5_cmd_cleanup(dev);
 
-err_unmap:
-       iounmap(dev->iseg);
-
-err_clr_master:
-       pci_clear_master(dev->pdev);
-       release_bar(dev->pdev);
-
-err_disable:
-       pci_disable_device(dev->pdev);
-
-err_dbg:
+out_err:
        dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+       mutex_unlock(&dev->intf_state_mutex);
+
        return err;
 }
 
-static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
+static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
 {

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to