Skip to content

Commit

Permalink
Merge branches 'cma', 'cxgb3', 'cxgb4', 'ehca', 'iser', 'mad', 'nes',…
Browse files Browse the repository at this point in the history
… 'qib', 'srp' and 'srpt' into for-next
  • Loading branch information
rolandd committed Mar 19, 2012
11 parents 42872c7 + 186834b + db4106c + 91018f8 + bd50f89 + 89e984e + 0b30704 + 8dd87fb + 520b3ee + 683b159 + a776ce7 commit f0e88ae
Show file tree
Hide file tree
Showing 18 changed files with 280 additions and 193 deletions.
21 changes: 21 additions & 0 deletions drivers/infiniband/core/mad.c
Original file line number Diff line number Diff line change
Expand Up @@ -1842,6 +1842,24 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
}
}

static bool generate_unmatched_resp(struct ib_mad_private *recv,
struct ib_mad_private *response)
{
if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET ||
recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) {
memcpy(response, recv, sizeof *response);
response->header.recv_wc.wc = &response->header.wc;
response->header.recv_wc.recv_buf.mad = &response->mad.mad;
response->header.recv_wc.recv_buf.grh = &response->grh;
response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
response->mad.mad.mad_hdr.status =
cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);

return true;
} else {
return false;
}
}
static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
struct ib_wc *wc)
{
Expand Down Expand Up @@ -1963,6 +1981,9 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
* or via recv_handler in ib_mad_complete_recv()
*/
recv = NULL;
} else if (generate_unmatched_resp(recv, response)) {
agent_send_response(&response->mad.mad, &recv->grh, wc,
port_priv->device, port_num, qp_info->qp->qp_num);
}

out:
Expand Down
37 changes: 18 additions & 19 deletions drivers/infiniband/core/ucma.c
Original file line number Diff line number Diff line change
Expand Up @@ -449,24 +449,6 @@ static void ucma_cleanup_multicast(struct ucma_context *ctx)
mutex_unlock(&mut);
}

static void ucma_cleanup_events(struct ucma_context *ctx)
{
struct ucma_event *uevent, *tmp;

list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
if (uevent->ctx != ctx)
continue;

list_del(&uevent->list);

/* clear incoming connections. */
if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
rdma_destroy_id(uevent->cm_id);

kfree(uevent);
}
}

static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
{
struct ucma_event *uevent, *tmp;
Expand All @@ -480,9 +462,16 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
}
}

/*
* We cannot hold file->mut when calling rdma_destroy_id() or we can
* deadlock. We also acquire file->mut in ucma_event_handler(), and
* rdma_destroy_id() will wait until all callbacks have completed.
*/
static int ucma_free_ctx(struct ucma_context *ctx)
{
int events_reported;
struct ucma_event *uevent, *tmp;
LIST_HEAD(list);

/* No new events will be generated after destroying the id. */
rdma_destroy_id(ctx->cm_id);
Expand All @@ -491,10 +480,20 @@ static int ucma_free_ctx(struct ucma_context *ctx)

/* Cleanup events not yet reported to the user. */
mutex_lock(&ctx->file->mut);
ucma_cleanup_events(ctx);
list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
if (uevent->ctx == ctx)
list_move_tail(&uevent->list, &list);
}
list_del(&ctx->list);
mutex_unlock(&ctx->file->mut);

list_for_each_entry_safe(uevent, tmp, &list, list) {
list_del(&uevent->list);
if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
rdma_destroy_id(uevent->cm_id);
kfree(uevent);
}

events_reported = ctx->events_reported;
kfree(ctx);
return events_reported;
Expand Down
40 changes: 20 additions & 20 deletions drivers/infiniband/hw/cxgb3/iwch_qp.c
Original file line number Diff line number Diff line change
Expand Up @@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
* Assumes qhp lock is held.
*/
static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
struct iwch_cq *schp, unsigned long *flag)
struct iwch_cq *schp)
{
int count;
int flushed;
Expand All @@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
/* take a ref on the qhp since we must release the lock */
atomic_inc(&qhp->refcnt);
spin_unlock_irqrestore(&qhp->lock, *flag);
spin_unlock(&qhp->lock);

/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, *flag);
spin_lock(&rchp->lock);
spin_lock(&qhp->lock);
cxio_flush_hw_cq(&rchp->cq);
cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, *flag);
spin_unlock(&rchp->lock);
if (flushed) {
spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
spin_lock(&rchp->comp_handler_lock);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
spin_unlock(&rchp->comp_handler_lock);
}

/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, *flag);
spin_lock(&schp->lock);
spin_lock(&qhp->lock);
cxio_flush_hw_cq(&schp->cq);
cxio_count_scqes(&schp->cq, &qhp->wq, &count);
flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, *flag);
spin_unlock(&schp->lock);
if (flushed) {
spin_lock_irqsave(&schp->comp_handler_lock, *flag);
spin_lock(&schp->comp_handler_lock);
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
spin_unlock(&schp->comp_handler_lock);
}

/* deref */
if (atomic_dec_and_test(&qhp->refcnt))
wake_up(&qhp->wait);

spin_lock_irqsave(&qhp->lock, *flag);
spin_lock(&qhp->lock);
}

static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
static void flush_qp(struct iwch_qp *qhp)
{
struct iwch_cq *rchp, *schp;

Expand All @@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
if (qhp->ibqp.uobject) {
cxio_set_wq_in_error(&qhp->wq);
cxio_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, *flag);
spin_lock(&rchp->comp_handler_lock);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag);
spin_unlock(&rchp->comp_handler_lock);
if (schp != rchp) {
cxio_set_cq_in_error(&schp->cq);
spin_lock_irqsave(&schp->comp_handler_lock, *flag);
spin_lock(&schp->comp_handler_lock);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
spin_unlock_irqrestore(&schp->comp_handler_lock, *flag);
spin_unlock(&schp->comp_handler_lock);
}
return;
}
__flush_qp(qhp, rchp, schp, flag);
__flush_qp(qhp, rchp, schp);
}


Expand Down Expand Up @@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
break;
case IWCH_QP_STATE_ERROR:
qhp->attr.state = IWCH_QP_STATE_ERROR;
flush_qp(qhp, &flag);
flush_qp(qhp);
break;
default:
ret = -EINVAL;
Expand Down Expand Up @@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
}
switch (attrs->next_state) {
case IWCH_QP_STATE_IDLE:
flush_qp(qhp, &flag);
flush_qp(qhp);
qhp->attr.state = IWCH_QP_STATE_IDLE;
qhp->attr.llp_stream_handle = NULL;
put_ep(&qhp->ep->com);
Expand Down Expand Up @@ -1132,7 +1132,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
free=1;
wake_up(&qhp->wait);
BUG_ON(!ep);
flush_qp(qhp, &flag);
flush_qp(qhp);
out:
spin_unlock_irqrestore(&qhp->lock, flag);

Expand Down
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/cxgb4/cm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1114,7 +1114,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* generated when moving QP to RTS state.
* A TERM message will be sent after QP has moved to RTS state
*/
if ((ep->mpa_attr.version == 2) &&
if ((ep->mpa_attr.version == 2) && peer2peer &&
(ep->mpa_attr.p2p_type != p2p_type)) {
ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
rtr_mismatch = 1;
Expand Down
3 changes: 2 additions & 1 deletion drivers/infiniband/hw/ehca/ehca_irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -786,7 +786,8 @@ static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
spin_lock_init(&cct->task_lock);
INIT_LIST_HEAD(&cct->cq_list);
init_waitqueue_head(&cct->wait_queue);
cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu);
cct->task = kthread_create_on_node(comp_task, cct, cpu_to_node(cpu),
"ehca_comp/%d", cpu);

return cct->task;
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/ehca/ehca_mrmw.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ static u32 ehca_encode_hwpage_size(u32 pgsize)

static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
{
return 1UL << ilog2(shca->hca_cap_mr_pgsize);
return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
}

static struct ehca_mr *ehca_mr_new(void)
Expand Down
39 changes: 22 additions & 17 deletions drivers/infiniband/hw/nes/nes_cm.c
Original file line number Diff line number Diff line change
Expand Up @@ -338,18 +338,21 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
case IETF_MPA_V2: {
u16 ird_size;
u16 ord_size;
u16 rtr_ctrl_ird;
u16 rtr_ctrl_ord;

mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
mpa_hdr_len += IETF_RTR_MSG_SIZE;
cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE;
rtr_msg = &mpa_v2_frame->rtr_msg;

/* parse rtr message */
rtr_msg->ctrl_ird = ntohs(rtr_msg->ctrl_ird);
rtr_msg->ctrl_ord = ntohs(rtr_msg->ctrl_ord);
ird_size = rtr_msg->ctrl_ird & IETF_NO_IRD_ORD;
ord_size = rtr_msg->ctrl_ord & IETF_NO_IRD_ORD;
rtr_ctrl_ird = ntohs(rtr_msg->ctrl_ird);
rtr_ctrl_ord = ntohs(rtr_msg->ctrl_ord);
ird_size = rtr_ctrl_ird & IETF_NO_IRD_ORD;
ord_size = rtr_ctrl_ord & IETF_NO_IRD_ORD;

if (!(rtr_msg->ctrl_ird & IETF_PEER_TO_PEER)) {
if (!(rtr_ctrl_ird & IETF_PEER_TO_PEER)) {
/* send reset */
return -EINVAL;
}
Expand All @@ -370,9 +373,9 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
}
}

if (rtr_msg->ctrl_ord & IETF_RDMA0_READ) {
if (rtr_ctrl_ord & IETF_RDMA0_READ) {
cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
} else if (rtr_msg->ctrl_ord & IETF_RDMA0_WRITE) {
} else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) {
cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
} else { /* Not supported RDMA0 operation */
return -EINVAL;
Expand Down Expand Up @@ -543,38 +546,40 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
{
struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
u16 ctrl_ird;
u16 ctrl_ord;

/* initialize the upper 5 bytes of the frame */
build_mpa_v1(cm_node, start_addr, mpa_key);
mpa_frame->flags |= IETF_MPA_V2_FLAG; /* set a bit to indicate MPA V2 */
mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);

/* initialize RTR msg */
rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
IETF_NO_IRD_ORD : cm_node->ird_size;
rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
IETF_NO_IRD_ORD : cm_node->ord_size;

rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER;
rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN;
ctrl_ird |= IETF_PEER_TO_PEER;
ctrl_ird |= IETF_FLPDU_ZERO_LEN;

switch (mpa_key) {
case MPA_KEY_REQUEST:
rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
ctrl_ord |= IETF_RDMA0_WRITE;
ctrl_ord |= IETF_RDMA0_READ;
break;
case MPA_KEY_REPLY:
switch (cm_node->send_rdma0_op) {
case SEND_RDMA_WRITE_ZERO:
rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
ctrl_ord |= IETF_RDMA0_WRITE;
break;
case SEND_RDMA_READ_ZERO:
rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
ctrl_ord |= IETF_RDMA0_READ;
break;
}
}
rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird);
rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord);
rtr_msg->ctrl_ird = htons(ctrl_ird);
rtr_msg->ctrl_ord = htons(ctrl_ord);
}

/**
Expand Down
10 changes: 9 additions & 1 deletion drivers/infiniband/hw/qib/qib.h
Original file line number Diff line number Diff line change
Expand Up @@ -427,6 +427,14 @@ struct qib_verbs_txreq {
/* how often we check for packet activity for "power on hours (in seconds) */
#define ACTIVITY_TIMER 5

#define MAX_NAME_SIZE 64
struct qib_msix_entry {
struct msix_entry msix;
void *arg;
char name[MAX_NAME_SIZE];
cpumask_var_t mask;
};

/* Below is an opaque struct. Each chip (device) can maintain
* private data needed for its operation, but not germane to the
* rest of the driver. For convenience, we define another that
Expand Down Expand Up @@ -1355,7 +1363,7 @@ int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
const struct pci_device_id *);
void qib_pcie_ddcleanup(struct qib_devdata *);
int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *);
int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *);
int qib_reinit_intr(struct qib_devdata *);
void qib_enable_intx(struct pci_dev *);
void qib_nomsi(struct qib_devdata *);
Expand Down
Loading

0 comments on commit f0e88ae

Please sign in to comment.