* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+static iavf_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ iavf_status ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
* i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+static iavf_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ iavf_status ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+static iavf_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
- i40e_status ret_code;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
+ iavf_status ret_code;
int i;
/* We'll be allocating the buffer info memory first, then we can
/* buffer_info structures do not need alignment */
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
- (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_arq_bufs;
hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+static iavf_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
- i40e_status ret_code;
struct i40e_dma_mem *bi;
+ iavf_status ret_code;
int i;
/* No mapped memory needed yet, just the buffer info structures */
ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
- (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_dma_mem)));
if (ret_code)
goto alloc_asq_bufs;
hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
*
* Configure base address and length registers for the transmit queue
**/
-static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
+static iavf_status i40e_config_asq_regs(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
*
* Configure base address and length registers for the receive (event queue)
**/
-static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
+static iavf_status i40e_config_arq_regs(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_asq(struct i40e_hw *hw)
+static iavf_status i40e_init_asq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_arq(struct i40e_hw *hw)
+static iavf_status i40e_init_arq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
*
* The main shutdown routine for the Admin Send Queue
**/
-static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+static iavf_status i40e_shutdown_asq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
mutex_lock(&hw->aq.asq_mutex);
*
* The main shutdown routine for the Admin Receive Queue
**/
-static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+static iavf_status i40e_shutdown_arq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
mutex_lock(&hw->aq.arq_mutex);
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
-i40e_status iavf_init_adminq(struct i40e_hw *hw)
+iavf_status iavf_init_adminq(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ iavf_status ret_code;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
* iavf_shutdown_adminq - shutdown routine for the Admin Queue
* @hw: pointer to the hardware structure
**/
-i40e_status iavf_shutdown_adminq(struct i40e_hw *hw)
+iavf_status iavf_shutdown_adminq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ iavf_status ret_code = 0;
if (iavf_check_asq_alive(hw))
iavf_aq_queue_shutdown(hw, true);
**/
static u16 i40e_clean_asq(struct i40e_hw *hw)
{
- struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_adminq_ring *asq = &hw->aq.asq;
struct i40e_asq_cmd_details *details;
u16 ntc = asq->next_to_clean;
struct i40e_aq_desc desc_cb;
* timing reliability than DD bit
*/
return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
-
}
/**
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-i40e_status iavf_asq_send_command(struct i40e_hw *hw,
+iavf_status iavf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
- i40e_status status = 0;
+ iavf_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
struct i40e_aq_desc *desc_on_ring;
*desc_on_ring = *desc;
/* if buff is not NULL assume indirect command */
- if (buff != NULL) {
- dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ if (buff) {
+ dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
/* copy the user buff into the respective DMA buff */
memcpy(dma_buff->va, buff, buff_size);
desc_on_ring->datalen = cpu_to_le16(buff_size);
/* if ready, copy the desc back to temp */
if (iavf_asq_done(hw)) {
*desc = *desc_on_ring;
- if (buff != NULL)
+ if (buff)
memcpy(buff, dma_buff->va, buff_size);
retval = le16_to_cpu(desc->retval);
if (retval != 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
- iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
- buff_size);
+ iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
/* save writeback aq if requested */
if (details->wb_desc)
*
* Fill the desc with default values
**/
-void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
- u16 opcode)
+void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
-i40e_status iavf_clean_arq_element(struct i40e_hw *hw,
+iavf_status iavf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *pending)
{
- i40e_status ret_code = 0;
u16 ntc = hw->aq.arq.next_to_clean;
struct i40e_aq_desc *desc;
+ iavf_status ret_code = 0;
struct i40e_dma_mem *bi;
u16 desc_idx;
u16 datalen;
e->desc = *desc;
datalen = le16_to_cpu(desc->datalen);
e->msg_len = min(datalen, e->buf_len);
- if (e->msg_buf != NULL && (e->msg_len != 0))
+ if (e->msg_buf && (e->msg_len != 0))
memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
e->msg_len);
clean_arq_element_out:
/* Set pending if needed, unlock and return */
- if (pending != NULL)
+ if (pending)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
clean_arq_element_err:
};
/* prototype for functions used for dynamic memory allocation */
-i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem,
- enum i40e_memory_type type,
- u64 size, u32 alignment);
-i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem);
-i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem,
- u32 size);
-i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem);
+iavf_status i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+iavf_status i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem);
+iavf_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem, u32 size);
+iavf_status i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem);
#endif /* _I40E_ALLOC_H_ */
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+iavf_status i40e_set_mac_type(struct i40e_hw *hw)
{
- i40e_status status = 0;
+ iavf_status status = 0;
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
status = I40E_ERR_DEVICE_NOT_SUPPORTED;
}
- hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
- hw->mac.type, status);
+ hw_dbg(hw, "found mac: %d, returns: %d\n", hw->mac.type, status);
return status;
}
* @hw: pointer to the HW structure
* @stat_err: the status error code to convert
**/
-const char *iavf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+const char *iavf_stat_str(struct i40e_hw *hw, iavf_status stat_err)
{
switch (stat_err) {
case 0:
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u8 *buf = (u8 *)buffer;
- if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ if ((!(mask & hw->debug_mask)) || !desc)
return;
i40e_debug(hw, mask,
le32_to_cpu(aq_desc->params.external.addr_high),
le32_to_cpu(aq_desc->params.external.addr_low));
- if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ if (buffer && aq_desc->datalen) {
u16 len = le16_to_cpu(aq_desc->datalen);
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
-i40e_status iavf_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading)
+iavf_status iavf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading)
{
struct i40e_aq_desc desc;
struct i40e_aqc_queue_shutdown *cmd =
(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- i40e_status status;
+ iavf_status status;
- iavf_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_queue_shutdown);
+ iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown);
if (unloading)
cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
*
* Internal function to get or set RSS look up table
**/
-static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+static iavf_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
u16 vsi_id, bool pf_lut,
u8 *lut, u16 lut_size,
bool set)
{
- i40e_status status;
+ iavf_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_lut *cmd_resp =
(struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
*
* get the RSS lookup table, PF or VSI type
**/
-i40e_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+iavf_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
*
* set the RSS lookup table, PF or VSI type
**/
-i40e_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+iavf_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
*
* get the RSS key per VSI
**/
-static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key,
- bool set)
+static
+iavf_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
{
- i40e_status status;
+ iavf_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_key *cmd_resp =
(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
* @key: pointer to key info struct
*
**/
-i40e_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id,
+iavf_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
*
* set the RSS key per VSI
**/
-i40e_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 vsi_id,
+iavf_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
* is sent asynchronously, i.e. iavf_asq_send_command() does not wait for
* completion before returning.
**/
-i40e_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw,
+iavf_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw,
enum virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg, u16 msglen,
+ iavf_status v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
struct i40e_asq_cmd_details details;
- i40e_status status;
+ struct i40e_aq_desc desc;
+ iavf_status status;
iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
* as none will be forthcoming. Immediately after calling this function,
* the admin queue should be shut down and (optionally) reinitialized.
**/
-i40e_status iavf_vf_reset(struct i40e_hw *hw)
+iavf_status iavf_vf_reset(struct i40e_hw *hw)
{
return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
__attribute__ ((format(gnu_printf, 3, 4)));
-typedef enum i40e_status_code i40e_status;
+typedef enum i40e_status_code iavf_status;
#endif /* _I40E_OSDEP_H_ */
*/
/* adminq functions */
-i40e_status iavf_init_adminq(struct i40e_hw *hw);
-i40e_status iavf_shutdown_adminq(struct i40e_hw *hw);
+iavf_status iavf_init_adminq(struct i40e_hw *hw);
+iavf_status iavf_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
-i40e_status iavf_clean_arq_element(struct i40e_hw *hw,
+iavf_status iavf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
-i40e_status iavf_asq_send_command(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
+iavf_status iavf_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
void i40e_idle_aq(struct i40e_hw *hw);
void iavf_resume_aq(struct i40e_hw *hw);
bool iavf_check_asq_alive(struct i40e_hw *hw);
-i40e_status iavf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+iavf_status iavf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *iavf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-const char *iavf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+const char *iavf_stat_str(struct i40e_hw *hw, iavf_status stat_err);
-i40e_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+iavf_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+iavf_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 seid,
+iavf_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
-i40e_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 seid,
+iavf_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
-i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+iavf_status i40e_set_mac_type(struct i40e_hw *hw);
extern struct i40e_rx_ptype_decoded iavf_ptype_lookup[];
/* i40e_common for VF drivers*/
void iavf_vf_parse_hw_config(struct i40e_hw *hw,
struct virtchnl_vf_resource *msg);
-i40e_status iavf_vf_reset(struct i40e_hw *hw);
-i40e_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw,
+iavf_status iavf_vf_reset(struct i40e_hw *hw);
+iavf_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw,
enum virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg, u16 msglen,
+ iavf_status v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */
void iavf_disable_vlan_stripping(struct iavf_adapter *adapter);
void iavf_virtchnl_completion(struct iavf_adapter *adapter,
enum virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg, u16 msglen);
+ iavf_status v_retval, u8 *msg, u16 msglen);
int iavf_config_rss(struct iavf_adapter *adapter);
int iavf_lan_add_device(struct iavf_adapter *adapter);
int iavf_lan_del_device(struct iavf_adapter *adapter);
static int iavf_client_release_qvlist(struct i40e_info *ldev)
{
struct iavf_adapter *adapter = ldev->vf;
- i40e_status err;
+ iavf_status err;
if (adapter->aq_required)
return -EAGAIN;
u8 *msg, u16 len)
{
struct iavf_adapter *adapter = ldev->vf;
- i40e_status err;
+ iavf_status err;
if (adapter->aq_required)
return -EAGAIN;
struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
struct iavf_adapter *adapter = ldev->vf;
struct i40e_qv_info *qv_info;
- i40e_status err;
+ iavf_status err;
u32 v_idx, i;
u32 msg_size;
(ec->rx_coalesce_usecs > I40E_MAX_ITR)) {
netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
- }
-
- else
- if (ec->tx_coalesce_usecs == 0) {
+ } else if (ec->tx_coalesce_usecs == 0) {
if (ec->use_adaptive_tx_coalesce)
netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
} else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) ||
if (!indir)
return 0;
- if (key) {
+ if (key)
memcpy(adapter->rss_key, key, adapter->rss_key_size);
- }
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < adapter->rss_lut_size; i++)
* @size: size of memory requested
* @alignment: what to align the allocation to
**/
-i40e_status iavf_allocate_dma_mem_d(struct i40e_hw *hw,
+iavf_status iavf_allocate_dma_mem_d(struct i40e_hw *hw,
struct i40e_dma_mem *mem,
u64 size, u32 alignment)
{
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-i40e_status iavf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+iavf_status iavf_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
{
struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
* @mem: ptr to mem struct to fill out
* @size: size of memory requested
**/
-i40e_status iavf_allocate_virt_mem_d(struct i40e_hw *hw,
+iavf_status iavf_allocate_virt_mem_d(struct i40e_hw *hw,
struct i40e_virt_mem *mem, u32 size)
{
if (!mem)
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-i40e_status iavf_free_virt_mem_d(struct i40e_hw *hw,
+iavf_status iavf_free_virt_mem_d(struct i40e_hw *hw,
struct i40e_virt_mem *mem)
{
if (!mem)
for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
+
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
if (q_vector->tx.ring && q_vector->rx.ring) {
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
+
if (q_idx < napi_vectors)
netif_napi_del(&q_vector->napi);
}
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
enum virtchnl_ops v_op;
- i40e_status ret, v_ret;
+ iavf_status ret, v_ret;
u32 val, oldval;
u16 pending;
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ v_ret = (iavf_status)le32_to_cpu(event.desc.cookie_low);
if (ret || !v_op)
break; /* No event to process or error cleaning ARQ */
enum virtchnl_ops op, u8 *msg, u16 len)
{
struct i40e_hw *hw = &adapter->hw;
- i40e_status err;
+ iavf_status err;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
return 0; /* nothing to see here, move along */
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
enum virtchnl_ops op;
- i40e_status err;
+ iavf_status err;
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
}
- err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ err = (iavf_status)le32_to_cpu(event.desc.cookie_low);
if (err)
goto out_alloc;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
if (PF_IS_V11(adapter))
- return iavf_send_pf_msg(adapter,
- VIRTCHNL_OP_GET_VF_RESOURCES,
- (u8 *)&caps, sizeof(caps));
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
+ (u8 *)&caps, sizeof(caps));
else
- return iavf_send_pf_msg(adapter,
- VIRTCHNL_OP_GET_VF_RESOURCES,
- NULL, 0);
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES,
+ NULL, 0);
}
/**
struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event;
enum virtchnl_ops op;
- i40e_status err;
+ iavf_status err;
u16 len;
len = sizeof(struct virtchnl_vf_resource) +
break;
}
- err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
+ err = (iavf_status)le32_to_cpu(event.desc.cookie_low);
memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
/* some PFs send more queues than we should have so validate that
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR,
- (u8 *)veal, len);
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len);
kfree(veal);
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR,
- (u8 *)veal, len);
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len);
kfree(veal);
}
adapter->current_op = VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = adapter->vsi_res->vsi_id;
/* queue maps are ignored for this message - only the vsi is used */
- if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS,
- (u8 *)&vqs, sizeof(vqs)))
+ if (iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs,
+ sizeof(vqs)))
/* if the request failed, don't lock out others */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
}
* This function handles the reply messages.
**/
void iavf_virtchnl_completion(struct iavf_adapter *adapter,
- enum virtchnl_ops v_opcode,
- i40e_status v_retval,
+ enum virtchnl_ops v_opcode, iavf_status v_retval,
u8 *msg, u16 msglen)
{
struct net_device *netdev = adapter->netdev;
struct virtchnl_pf_event *vpe =
(struct virtchnl_pf_event *)msg;
bool link_up = vpe->event_data.link_event.link_status;
+
switch (vpe->event) {
case VIRTCHNL_EVENT_LINK_CHANGE:
adapter->link_speed =
break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
- v_retval,
- iavf_stat_str(&adapter->hw, v_retval),
+ v_retval, iavf_stat_str(&adapter->hw, v_retval),
v_opcode);
}
}
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+
if (msglen == sizeof(*vrh))
adapter->hena = vrh->hena;
else
case VIRTCHNL_OP_REQUEST_QUEUES: {
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
+
if (vfres->num_queue_pairs != adapter->num_req_queues) {
dev_info(&adapter->pdev->dev,
"Requested %d queues, PF can support %d\n",