Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Browse files Browse the repository at this point in the history
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
davem330 committed Jan 29, 2018
2 parents 868c36d + ba804bb commit 3e3ab9c
Show file tree
Hide file tree
Showing 28 changed files with 346 additions and 220 deletions.
18 changes: 11 additions & 7 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -769,7 +769,7 @@ static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)

/*
* Must be called with kvm->srcu held to avoid races on memslots, and with
* kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
* kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
*/
static int kvm_s390_vm_start_migration(struct kvm *kvm)
{
Expand Down Expand Up @@ -825,7 +825,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
}

/*
* Must be called with kvm->lock to avoid races with ourselves and
* Must be called with kvm->slots_lock to avoid races with ourselves and
* kvm_s390_vm_start_migration.
*/
static int kvm_s390_vm_stop_migration(struct kvm *kvm)
Expand All @@ -840,6 +840,8 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)

if (kvm->arch.use_cmma) {
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
/* We have to wait for the essa emulation to finish */
synchronize_srcu(&kvm->srcu);
vfree(mgs->pgste_bitmap);
}
kfree(mgs);
Expand All @@ -849,22 +851,20 @@ static int kvm_s390_vm_stop_migration(struct kvm *kvm)
static int kvm_s390_vm_set_migration(struct kvm *kvm,
struct kvm_device_attr *attr)
{
int idx, res = -ENXIO;
int res = -ENXIO;

mutex_lock(&kvm->lock);
mutex_lock(&kvm->slots_lock);
switch (attr->attr) {
case KVM_S390_VM_MIGRATION_START:
idx = srcu_read_lock(&kvm->srcu);
res = kvm_s390_vm_start_migration(kvm);
srcu_read_unlock(&kvm->srcu, idx);
break;
case KVM_S390_VM_MIGRATION_STOP:
res = kvm_s390_vm_stop_migration(kvm);
break;
default:
break;
}
mutex_unlock(&kvm->lock);
mutex_unlock(&kvm->slots_lock);

return res;
}
Expand Down Expand Up @@ -1754,7 +1754,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&args, argp, sizeof(args)))
break;
mutex_lock(&kvm->slots_lock);
r = kvm_s390_get_cmma_bits(kvm, &args);
mutex_unlock(&kvm->slots_lock);
if (!r) {
r = copy_to_user(argp, &args, sizeof(args));
if (r)
Expand All @@ -1768,7 +1770,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&args, argp, sizeof(args)))
break;
mutex_lock(&kvm->slots_lock);
r = kvm_s390_set_cmma_bits(kvm, &args);
mutex_unlock(&kvm->slots_lock);
break;
}
default:
Expand Down
46 changes: 31 additions & 15 deletions drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg)
struct nvkm_pci *pci = arg;
struct nvkm_device *device = pci->subdev.device;
bool handled = false;

if (pci->irq < 0)
return IRQ_HANDLED;

nvkm_mc_intr_unarm(device);
if (pci->msi)
pci->func->msi_rearm(pci);
Expand All @@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_pci *pci = nvkm_pci(subdev);

if (pci->irq >= 0) {
free_irq(pci->irq, pci);
pci->irq = -1;
}

if (pci->agp.bridge)
nvkm_agp_fini(pci);

Expand All @@ -108,16 +107,27 @@ static int
nvkm_pci_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
if (pci_is_pcie(pci->pdev))
return nvkm_pcie_oneinit(pci);
struct pci_dev *pdev = pci->pdev;
int ret;

if (pci_is_pcie(pci->pdev)) {
ret = nvkm_pcie_oneinit(pci);
if (ret)
return ret;
}

ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
if (ret)
return ret;

pci->irq = pdev->irq;
return 0;
}

static int
nvkm_pci_init(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);
struct pci_dev *pdev = pci->pdev;
int ret;

if (pci->agp.bridge) {
Expand All @@ -131,28 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev)
if (pci->func->init)
pci->func->init(pci);

ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
if (ret)
return ret;

pci->irq = pdev->irq;

/* Ensure MSI interrupts are armed, for the case where there are
* already interrupts pending (for whatever reason) at load time.
*/
if (pci->msi)
pci->func->msi_rearm(pci);

return ret;
return 0;
}

static void *
nvkm_pci_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_pci *pci = nvkm_pci(subdev);

nvkm_agp_dtor(pci);

if (pci->irq >= 0) {
/* freq_irq() will call the handler, we use pci->irq == -1
* to signal that it's been torn down and should be a noop.
*/
int irq = pci->irq;
pci->irq = -1;
free_irq(irq, pci);
}

if (pci->msi)
pci_disable_msi(pci->pdev);

return nvkm_pci(subdev);
}

Expand Down
33 changes: 27 additions & 6 deletions drivers/gpu/drm/vc4/vc4_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev)
struct vc4_exec_info *exec[2];
struct vc4_bo *bo;
unsigned long irqflags;
unsigned int i, j, unref_list_count, prev_idx;
unsigned int i, j, k, unref_list_count;

kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL);
if (!kernel_state)
Expand Down Expand Up @@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev)
return;
}

prev_idx = 0;
k = 0;
for (i = 0; i < 2; i++) {
if (!exec[i])
continue;
Expand All @@ -197,20 +197,20 @@ vc4_save_hang_state(struct drm_device *dev)
WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt);
drm_gem_object_get(&exec[i]->bo[j]->base);
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
kernel_state->bo[k++] = &exec[i]->bo[j]->base;
}

list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
/* No need to retain BOs coming from the ->unref_list
* because they are naturally unpurgeable.
*/
drm_gem_object_get(&bo->base.base);
kernel_state->bo[j + prev_idx] = &bo->base.base;
j++;
kernel_state->bo[k++] = &bo->base.base;
}
prev_idx = j + 1;
}

WARN_ON_ONCE(k != state->bo_count);

if (exec[0])
state->start_bin = exec[0]->ct0ca;
if (exec[1])
Expand Down Expand Up @@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev)
VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC));
}

static void
vc4_flush_texture_caches(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);

V3D_WRITE(V3D_L2CACTL,
V3D_L2CACTL_L2CCLR);

V3D_WRITE(V3D_SLCACTL,
VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) |
VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC));
}

/* Sets the registers for the next job to be actually be executed in
* the hardware.
*
Expand Down Expand Up @@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev)
if (!exec)
return;

/* A previous RCL may have written to one of our textures, and
* our full cache flush at bin time may have occurred before
* that RCL completed. Flush the texture cache now, but not
* the instructions or uniforms (since we don't write those
* from an RCL).
*/
vc4_flush_texture_caches(dev);

submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
}

Expand Down
3 changes: 1 addition & 2 deletions drivers/infiniband/ulp/ipoib/ipoib_cm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1456,8 +1456,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int e = skb_queue_empty(&priv->cm.skb_queue);

if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
skb_dst_update_pmtu(skb, mtu);

skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
Expand Down
19 changes: 19 additions & 0 deletions drivers/input/joystick/xpad.c
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
Expand Down Expand Up @@ -475,6 +476,22 @@ static const u8 xboxone_hori_init[] = {
0x00, 0x00, 0x00, 0x80, 0x00
};

/*
* This packet is required for some of the PDP pads to start
* sending input reports. One of those pads is (0x0e6f:0x02ab).
*/
static const u8 xboxone_pdp_init1[] = {
0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
};

/*
* This packet is required for some of the PDP pads to start
* sending input reports. One of those pads is (0x0e6f:0x02ab).
*/
static const u8 xboxone_pdp_init2[] = {
0x06, 0x20, 0x00, 0x02, 0x01, 0x00
};

/*
* A specific rumble packet is required for some PowerA pads to start
* sending input reports. One of those pads is (0x24c6:0x543a).
Expand Down Expand Up @@ -505,6 +522,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
Expand Down
Loading

0 comments on commit 3e3ab9c

Please sign in to comment.