Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/ehca: SRQ fixes to enable IPoIB CM
  IB/ehca: Fix Small QP regressions
  • Loading branch information
Linus Torvalds committed Sep 1, 2007
2 parents e89a5a4 + 5ff70ca commit 6db602d
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 25 deletions.
10 changes: 7 additions & 3 deletions drivers/infiniband/hw/ehca/ehca_hca.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,13 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
props->max_pd = min_t(int, rblock->max_pd, INT_MAX);
props->max_ah = min_t(int, rblock->max_ah, INT_MAX);
props->max_fmr = min_t(int, rblock->max_mr, INT_MAX);
props->max_srq = 0;
props->max_srq_wr = 0;
props->max_srq_sge = 0;

if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
props->max_srq = props->max_qp;
props->max_srq_wr = props->max_qp_wr;
props->max_srq_sge = 3;
}

props->max_pkeys = 16;
props->local_ca_ack_delay
= rblock->local_ca_ack_delay;
Expand Down
48 changes: 31 additions & 17 deletions drivers/infiniband/hw/ehca/ehca_irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -175,41 +175,55 @@ int ehca_error_data(struct ehca_shca *shca, void *data,

}

static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
enum ib_event_type event_type, int fatal)
static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
enum ib_event_type event_type)
{
struct ib_event event;
struct ehca_qp *qp;
u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);

read_lock(&ehca_qp_idr_lock);
qp = idr_find(&ehca_qp_idr, token);
read_unlock(&ehca_qp_idr_lock);


if (!qp)
return;

if (fatal)
ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);

event.device = &shca->ib_device;
event.event = event_type;

if (qp->ext_type == EQPT_SRQ) {
if (!qp->ib_srq.event_handler)
return;

event.event = fatal ? IB_EVENT_SRQ_ERR : event_type;
event.element.srq = &qp->ib_srq;
qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
} else {
if (!qp->ib_qp.event_handler)
return;

event.event = event_type;
event.element.qp = &qp->ib_qp;
qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
}
}

static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
enum ib_event_type event_type, int fatal)
{
struct ehca_qp *qp;
u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);

read_lock(&ehca_qp_idr_lock);
qp = idr_find(&ehca_qp_idr, token);
read_unlock(&ehca_qp_idr_lock);

if (!qp)
return;

if (fatal)
ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);

dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
IB_EVENT_SRQ_ERR : event_type);

/*
* eHCA only processes one WQE at a time for SRQ base QPs,
* so the last WQE has been processed as soon as the QP enters
* error state.
*/
if (fatal && qp->ext_type == EQPT_SRQBASE)
dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);

return;
}
Expand Down
10 changes: 6 additions & 4 deletions drivers/infiniband/hw/ehca/ehca_qp.c
Original file line number Diff line number Diff line change
Expand Up @@ -600,10 +600,12 @@ static struct ehca_qp *internal_create_qp(

if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)
&& !(context && udata)) { /* no small QP support in userspace ATM */
ehca_determine_small_queue(
&parms.squeue, max_send_sge, is_llqp);
ehca_determine_small_queue(
&parms.rqueue, max_recv_sge, is_llqp);
if (HAS_SQ(my_qp))
ehca_determine_small_queue(
&parms.squeue, max_send_sge, is_llqp);
if (HAS_RQ(my_qp))
ehca_determine_small_queue(
&parms.rqueue, max_recv_sge, is_llqp);
parms.qp_storage =
(parms.squeue.is_small || parms.rqueue.is_small);
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/ehca/ipz_pt_fn.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
unsigned long bit;
int free_page = 0;

bit = ((unsigned long)queue->queue_pages[0] & PAGE_MASK)
bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
>> (order + 9);

mutex_lock(&pd->lock);
Expand Down

0 comments on commit 6db602d

Please sign in to comment.