diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index dc832ddf7033f57a61196f5eba7f96581d01c5f9..06a42b68446fcdeea3b52324888fd5d738cdcc80 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -692,7 +692,7 @@ void xhci_debugfs_init(struct xhci_hcd *xhci) "command-ring", xhci->debugfs_root); - xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring, + xhci_debugfs_create_ring_dir(xhci, &xhci->interrupters[0]->event_ring, "event-ring", xhci->debugfs_root); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 94adae8b19f00c709a056f10763158485399b43c..7fb683d06e0be0f34933cf6011e30df284fa86a3 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -1762,9 +1762,10 @@ int xhci_bus_resume(struct usb_hcd *hcd) int slot_id; int sret; u32 next_state; - u32 temp, portsc; + u32 portsc; struct xhci_hub *rhub; struct xhci_port **ports; + bool disabled_irq = false; rhub = xhci_get_rhub(hcd); ports = rhub->ports; @@ -1780,17 +1781,20 @@ int xhci_bus_resume(struct usb_hcd *hcd) return -ESHUTDOWN; } - /* delay the irqs */ - temp = readl(&xhci->op_regs->command); - temp &= ~CMD_EIE; - writel(temp, &xhci->op_regs->command); - /* bus specific resume for ports we suspended at bus_suspend */ - if (hcd->speed >= HCD_USB3) + if (hcd->speed >= HCD_USB3) { next_state = XDEV_U0; - else + } else { next_state = XDEV_RESUME; - + if (bus_state->bus_suspended) { + /* + * prevent port event interrupts from interfering + * with usb2 port resume process + */ + xhci_disable_interrupter(xhci->interrupters[0]); + disabled_irq = true; + } + } port_index = max_ports; while (port_index--) { portsc = readl(ports[port_index]->addr); @@ -1859,11 +1863,9 @@ int xhci_bus_resume(struct usb_hcd *hcd) (void) readl(&xhci->op_regs->command); bus_state->next_statechange = jiffies + msecs_to_jiffies(5); - /* re-enable irqs */ - temp = readl(&xhci->op_regs->command); - temp |= CMD_EIE; - writel(temp, &xhci->op_regs->command); - temp = readl(&xhci->op_regs->command); + /* re-enable interrupter */ + if (disabled_irq) + xhci_enable_interrupter(xhci->interrupters[0]); spin_unlock_irqrestore(&xhci->lock, flags); return 0; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 5c55bf3299a3746ffbe6f2e25e96655c88c6c378..27a8d0a9647c28740c18fb398cc91cbb40be20b5 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -28,6 +28,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, unsigned int cycle_state, unsigned int max_packet, + unsigned int num, gfp_t flags) { struct xhci_segment *seg; @@ -59,6 +60,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, for (i = 0; i < TRBS_PER_SEGMENT; i++) seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); } + seg->num = num; seg->dma = dma; seg->next = NULL; @@ -316,6 +318,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring, */ ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; } +EXPORT_SYMBOL_GPL(xhci_initialize_ring_info); /* Allocate segments and link them for a ring */ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, @@ -324,6 +327,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) { struct xhci_segment *prev; + unsigned int num = 0; bool chain_links; /* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */ @@ -331,16 +335,17 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, (type == TYPE_ISOC && (xhci->quirks & XHCI_AMD_0x96_HOST))); - prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); + prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags); if (!prev) return -ENOMEM; - num_segs--; + num++; *first = prev; - while (num_segs > 0) { + while (num < num_segs) { struct xhci_segment *next; - next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); + next = xhci_segment_alloc(xhci, cycle_state, max_packet, num, + flags); if (!next) { prev = *first; while (prev) { @@ -353,7 +358,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, xhci_link_segments(prev, next, type, chain_links); prev = next; - num_segs--; + num++; } xhci_link_segments(prev, *first, type, chain_links); *last = prev; @@ -630,8 +635,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, int ret; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; - xhci_dbg(xhci, "Allocating %u streams and %u " - "stream context array entries.\n", + xhci_dbg(xhci, "Allocating %u streams and %u stream context array entries.\n", num_streams, num_stream_ctxs); if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) { xhci_dbg(xhci, "Command ring has no reserved TRBs available\n"); @@ -1833,18 +1837,77 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, return 0; } -void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) +static void +xhci_remove_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) +{ + u32 tmp; + + if (!ir) + return; + + /* + * Clean out interrupter registers except ERSTBA. Clearing either the + * low or high 32 bits of ERSTBA immediately causes the controller to + * dereference the partially cleared 64 bit address, causing IOMMU error. + */ + if (ir->ir_set) { + tmp = readl(&ir->ir_set->erst_size); + tmp &= ERST_SIZE_MASK; + writel(tmp, &ir->ir_set->erst_size); + + xhci_write_64(xhci, ERST_EHB, &ir->ir_set->erst_dequeue); + } +} + +static void +xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) { - size_t size; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + size_t erst_size; - size = sizeof(struct xhci_erst_entry) * (erst->num_entries); - if (erst->entries) - dma_free_coherent(dev, size, - erst->entries, - erst->erst_dma_addr); - erst->entries = NULL; + if (!ir) + return; + + erst_size = sizeof(struct xhci_erst_entry) * ir->erst.num_entries; + if (ir->erst.entries) + dma_free_coherent(dev, erst_size, + ir->erst.entries, + ir->erst.erst_dma_addr); + ir->erst.entries = NULL; + + /* free interrupter event ring */ + if (ir->event_ring) + xhci_ring_free(xhci, ir->event_ring); + + ir->event_ring = NULL; + + kfree(ir); +} + +void xhci_remove_secondary_interrupter(struct usb_hcd *hcd, struct xhci_interrupter *ir) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + unsigned int intr_num; + + /* interrupter 0 is primary interrupter, don't touch it */ + if (!ir || !ir->intr_num || ir->intr_num >= xhci->max_interrupters) + xhci_dbg(xhci, "Invalid secondary interrupter, can't remove\n"); + + /* fixme, should we check xhci->interrupter[intr_num] == ir */ + /* fixme locking */ + + spin_lock_irq(&xhci->lock); + + intr_num = ir->intr_num; + + xhci_remove_interrupter(xhci, ir); + xhci->interrupters[intr_num] = NULL; + + spin_unlock_irq(&xhci->lock); + + xhci_free_interrupter(xhci, ir); } +EXPORT_SYMBOL_GPL(xhci_remove_secondary_interrupter); void xhci_mem_cleanup(struct xhci_hcd *xhci) { @@ -1853,12 +1916,14 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) cancel_delayed_work_sync(&xhci->cmd_timer); - xhci_free_erst(xhci, &xhci->erst); - - if (xhci->event_ring) - xhci_ring_free(xhci, xhci->event_ring); - xhci->event_ring = NULL; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring"); + for (i = 0; i < xhci->max_interrupters; i++) { + if (xhci->interrupters[i]) { + xhci_remove_interrupter(xhci, xhci->interrupters[i]); + xhci_free_interrupter(xhci, xhci->interrupters[i]); + xhci->interrupters[i] = NULL; + } + } + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed interrupters"); if (xhci->lpm_command) xhci_free_command(xhci, xhci->lpm_command); @@ -1931,6 +1996,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) for (i = 0; i < xhci->num_port_caps; i++) kfree(xhci->port_caps[i].psi); kfree(xhci->port_caps); + kfree(xhci->interrupters); xhci->num_port_caps = 0; xhci->usb2_rhub.ports = NULL; @@ -1939,6 +2005,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->rh_bw = NULL; xhci->ext_caps = NULL; xhci->port_caps = NULL; + xhci->interrupters = NULL; xhci->page_size = 0; xhci->page_shift = 0; @@ -1946,186 +2013,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->usb3_rhub.bus_state.bus_suspended = 0; } -static int xhci_test_trb_in_td(struct xhci_hcd *xhci, - struct xhci_segment *input_seg, - union xhci_trb *start_trb, - union xhci_trb *end_trb, - dma_addr_t input_dma, - struct xhci_segment *result_seg, - char *test_name, int test_number) -{ - unsigned long long start_dma; - unsigned long long end_dma; - struct xhci_segment *seg; - - start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); - end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); - - seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false); - if (seg != result_seg) { - xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", - test_name, test_number); - xhci_warn(xhci, "Tested TRB math w/ seg %p and " - "input DMA 0x%llx\n", - input_seg, - (unsigned long long) input_dma); - xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " - "ending TRB %p (0x%llx DMA)\n", - start_trb, start_dma, - end_trb, end_dma); - xhci_warn(xhci, "Expected seg %p, got seg %p\n", - result_seg, seg); - trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, - true); - return -1; - } - return 0; -} - -/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ -static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci) -{ - struct { - dma_addr_t input_dma; - struct xhci_segment *result_seg; - } simple_test_vector [] = { - /* A zeroed DMA field should fail */ - { 0, NULL }, - /* One TRB before the ring start should fail */ - { xhci->event_ring->first_seg->dma - 16, NULL }, - /* One byte before the ring start should fail */ - { xhci->event_ring->first_seg->dma - 1, NULL }, - /* Starting TRB should succeed */ - { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, - /* Ending TRB should succeed */ - { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, - xhci->event_ring->first_seg }, - /* One byte after the ring end should fail */ - { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, - /* One TRB after the ring end should fail */ - { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, - /* An address of all ones should fail */ - { (dma_addr_t) (~0), NULL }, - }; - struct { - struct xhci_segment *input_seg; - union xhci_trb *start_trb; - union xhci_trb *end_trb; - dma_addr_t input_dma; - struct xhci_segment *result_seg; - } complex_test_vector [] = { - /* Test feeding a valid DMA address from a different ring */ - { .input_seg = xhci->event_ring->first_seg, - .start_trb = xhci->event_ring->first_seg->trbs, - .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], - .input_dma = xhci->cmd_ring->first_seg->dma, - .result_seg = NULL, - }, - /* Test feeding a valid end TRB from a different ring */ - { .input_seg = xhci->event_ring->first_seg, - .start_trb = xhci->event_ring->first_seg->trbs, - .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], - .input_dma = xhci->cmd_ring->first_seg->dma, - .result_seg = NULL, - }, - /* Test feeding a valid start and end TRB from a different ring */ - { .input_seg = xhci->event_ring->first_seg, - .start_trb = xhci->cmd_ring->first_seg->trbs, - .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], - .input_dma = xhci->cmd_ring->first_seg->dma, - .result_seg = NULL, - }, - /* TRB in this ring, but after this TD */ - { .input_seg = xhci->event_ring->first_seg, - .start_trb = &xhci->event_ring->first_seg->trbs[0], - .end_trb = &xhci->event_ring->first_seg->trbs[3], - .input_dma = xhci->event_ring->first_seg->dma + 4*16, - .result_seg = NULL, - }, - /* TRB in this ring, but before this TD */ - { .input_seg = xhci->event_ring->first_seg, - .start_trb = &xhci->event_ring->first_seg->trbs[3], - .end_trb = &xhci->event_ring->first_seg->trbs[6], - .input_dma = xhci->event_ring->first_seg->dma + 2*16, - .result_seg = NULL, - }, - /* TRB in this ring, but after this wrapped TD */ - { .input_seg = xhci->event_ring->first_seg, - .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], - .end_trb = &xhci->event_ring->first_seg->trbs[1], - .input_dma = xhci->event_ring->first_seg->dma + 2*16, - .result_seg = NULL, - }, - /* TRB in this ring, but before this wrapped TD */ - { .input_seg = xhci->event_ring->first_seg, - .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], - .end_trb = &xhci->event_ring->first_seg->trbs[1], - .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, - .result_seg = NULL, - }, - /* TRB not in this ring, and we have a wrapped TD */ - { .input_seg = xhci->event_ring->first_seg, - .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], - .end_trb = &xhci->event_ring->first_seg->trbs[1], - .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, - .result_seg = NULL, - }, - }; - - unsigned int num_tests; - int i, ret; - - num_tests = ARRAY_SIZE(simple_test_vector); - for (i = 0; i < num_tests; i++) { - ret = xhci_test_trb_in_td(xhci, - xhci->event_ring->first_seg, - xhci->event_ring->first_seg->trbs, - &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], - simple_test_vector[i].input_dma, - simple_test_vector[i].result_seg, - "Simple", i); - if (ret < 0) - return ret; - } - - num_tests = ARRAY_SIZE(complex_test_vector); - for (i = 0; i < num_tests; i++) { - ret = xhci_test_trb_in_td(xhci, - complex_test_vector[i].input_seg, - complex_test_vector[i].start_trb, - complex_test_vector[i].end_trb, - complex_test_vector[i].input_dma, - complex_test_vector[i].result_seg, - "Complex", i); - if (ret < 0) - return ret; - } - xhci_dbg(xhci, "TRB math tests passed.\n"); - return 0; -} - -static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) +static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir) { - u64 temp; dma_addr_t deq; - deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, - xhci->event_ring->dequeue); - if (deq == 0 && !in_interrupt()) - xhci_warn(xhci, "WARN something wrong with SW event ring " - "dequeue ptr.\n"); + deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg, + ir->event_ring->dequeue); + if (!deq) + xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr.\n"); /* Update HC event ring dequeue pointer */ - temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - temp &= ERST_PTR_MASK; /* Don't clear the EHB bit (which is RW1C) because * there might be more events to service. */ - temp &= ~ERST_EHB; xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Write event ring dequeue pointer, " - "preserving EHB bit"); - xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, - &xhci->ir_set->erst_dequeue); + "// Write event ring dequeue pointer, preserving EHB bit"); + xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK), + &ir->ir_set->erst_dequeue); } static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, @@ -2156,8 +2059,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, } else if (major_revision <= 0x02) { rhub = &xhci->usb2_rhub; } else { - xhci_warn(xhci, "Ignoring unknown port speed, " - "Ext Cap %p, revision = 0x%x\n", + xhci_warn(xhci, "Ignoring unknown port speed, Ext Cap %p, revision = 0x%x\n", addr, major_revision); /* Ignoring port protocol we can't understand. FIXME */ return; @@ -2172,9 +2074,8 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, port_offset = XHCI_EXT_PORT_OFF(temp); port_count = XHCI_EXT_PORT_COUNT(temp); xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Ext Cap %p, port offset = %u, " - "count = %u, revision = 0x%x", - addr, port_offset, port_count, major_revision); + "Ext Cap %p, port offset = %u, count = %u, revision = 0x%x", + addr, port_offset, port_count, major_revision); /* Port count includes the current port offset */ if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) /* WTF? "Valid values are ‘1’ to MaxPorts" */ @@ -2231,10 +2132,8 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, struct xhci_port *hw_port = &xhci->hw_ports[i]; /* Duplicate entry. Ignore the port if the revisions differ. */ if (hw_port->rhub) { - xhci_warn(xhci, "Duplicate port entry, Ext Cap %p," - " port %u\n", addr, i); - xhci_warn(xhci, "Port was marked as USB %u, " - "duplicated as USB %u\n", + xhci_warn(xhci, "Duplicate port entry, Ext Cap %p, port %u\n", addr, i); + xhci_warn(xhci, "Port was marked as USB %u, duplicated as USB %u\n", hw_port->rhub->maj_rev, major_revision); /* Only adjust the roothub port counts if we haven't * found a similar duplicate. @@ -2391,14 +2290,131 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) return 0; } +static struct xhci_interrupter * +xhci_alloc_interrupter(struct xhci_hcd *xhci, int segs, gfp_t flags) +{ + struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + struct xhci_interrupter *ir; + unsigned int num_segs = segs; + int ret; + + ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev)); + if (!ir) + return NULL; + + /* number of ring segments should be greater than 0 */ + if (segs <= 0) + num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2), + ERST_MAX_SEGS); + + ir->event_ring = xhci_ring_alloc(xhci, num_segs, 1, TYPE_EVENT, 0, + flags); + if (!ir->event_ring) { + xhci_warn(xhci, "Failed to allocate interrupter event ring\n"); + kfree(ir); + return NULL; + } + + ret = xhci_alloc_erst(xhci, ir->event_ring, &ir->erst, flags); + if (ret) { + xhci_warn(xhci, "Failed to allocate interrupter erst\n"); + xhci_ring_free(xhci, ir->event_ring); + kfree(ir); + return NULL; + } + + return ir; +} + +static int +xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir, + unsigned int intr_num) +{ + u64 erst_base; + u32 erst_size; + + if (intr_num > xhci->max_interrupters) { + xhci_warn(xhci, "Can't add interrupter %d, max interrupters %d\n", + intr_num, xhci->max_interrupters); + return -EINVAL; + } + + if (xhci->interrupters[intr_num]) { + xhci_warn(xhci, "Interrupter %d\n already set up", intr_num); + return -EINVAL; + } + + xhci->interrupters[intr_num] = ir; + ir->intr_num = intr_num; + ir->ir_set = &xhci->run_regs->ir_set[intr_num]; + + /* set ERST count with the number of entries in the segment table */ + erst_size = readl(&ir->ir_set->erst_size); + erst_size &= ERST_SIZE_MASK; + erst_size |= ir->event_ring->num_segs; + writel(erst_size, &ir->ir_set->erst_size); + + erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); + erst_base &= ERST_PTR_MASK; + erst_base |= (ir->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); + xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base); + + /* Set the event ring dequeue address of this interrupter */ + xhci_set_hc_event_deq(xhci, ir); + + return 0; +} + +struct xhci_interrupter * +xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_interrupter *ir; + unsigned int i; + int err = -ENOSPC; + + if (!xhci->interrupters || xhci->max_interrupters <= 1) + return NULL; + + ir = xhci_alloc_interrupter(xhci, num_seg, GFP_KERNEL); + if (!ir) + return NULL; + + spin_lock_irq(&xhci->lock); + + /* Find available secondary interrupter, interrupter 0 is reserved for primary */ + for (i = 1; i < xhci->max_interrupters; i++) { + if (xhci->interrupters[i] == NULL) { + err = xhci_add_interrupter(xhci, ir, i); + break; + } + } + + spin_unlock_irq(&xhci->lock); + + if (err) { + xhci_warn(xhci, "Failed to add secondary interrupter, max interrupters %d\n", + xhci->max_interrupters); + xhci_free_interrupter(xhci, ir); + return NULL; + } + + xhci_dbg(xhci, "Add secondary interrupter %d, max interrupters %d\n", + i, xhci->max_interrupters); + + return ir; +} +EXPORT_SYMBOL_GPL(xhci_create_secondary_interrupter); + int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { - dma_addr_t dma; + struct xhci_interrupter *ir; struct device *dev = xhci_to_hcd(xhci)->self.sysdev; + dma_addr_t dma; unsigned int val, val2; u64 val_64; u32 page_size, temp; - int i, ret; + int i; INIT_LIST_HEAD(&xhci->cmd_list); @@ -2523,52 +2539,22 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) val = readl(&xhci->cap_regs->db_off); val &= DBOFF_MASK; xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Doorbell array is located at offset 0x%x" - " from cap regs base addr", val); + "// Doorbell array is located at offset 0x%x from cap regs base addr", + val); xhci->dba = (void __iomem *) xhci->cap_regs + val; - /* Set ir_set to interrupt register set 0 */ - xhci->ir_set = &xhci->run_regs->ir_set[0]; - /* - * Event ring setup: Allocate a normal ring, but also setup - * the event ring segment table (ERST). Section 4.9.3. - */ - xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring"); - xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, - 0, flags); - if (!xhci->event_ring) - goto fail; - if (xhci_check_trb_in_td_math(xhci) < 0) - goto fail; + /* Allocate and set up primary interrupter 0 with an event ring. */ + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Allocating primary event ring"); + xhci->interrupters = kcalloc_node(xhci->max_interrupters, sizeof(*xhci->interrupters), + flags, dev_to_node(dev)); - ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags); - if (ret) + ir = xhci_alloc_interrupter(xhci, 0, flags); + if (!ir) goto fail; - /* set ERST count with the number of entries in the segment table */ - val = readl(&xhci->ir_set->erst_size); - val &= ERST_SIZE_MASK; - val |= ERST_NUM_SEGS; - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Write ERST size = %i to ir_set 0 (some bits preserved)", - val); - writel(val, &xhci->ir_set->erst_size); - - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Set ERST entries to point to event ring."); - /* set the segment table base address */ - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Set ERST base address for ir_set 0 = 0x%llx", - (unsigned long long)xhci->erst.erst_dma_addr); - val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); - val_64 &= ERST_PTR_MASK; - val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); - xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); - - /* Set the event ring dequeue address */ - xhci_set_hc_event_deq(xhci); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Wrote ERST address to ir_set 0."); + if (xhci_add_interrupter(xhci, ir, 0)) + goto fail; /* * XXX: Might need to set the Interrupter Moderation Register to diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 0a3f26b66b2333c290f613e597b18a900357b987..c990f34b65ca1e22cae451b093060b251549b5c1 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -368,6 +368,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (xhci->quirks & XHCI_RESET_ON_RESUME) xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "QUIRK: Resetting on resume"); + + if (pdev->vendor == PCI_VENDOR_ID_HUAWEI && + (pdev->device == 0xa23c || pdev->device == 0xa23d)) + xhci->quirks |= XHCI_USB3_NOOP; } #ifdef CONFIG_ACPI diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 583d90744bead7f88b52f28972ac2826a38aa06c..603260541c1340efb93149c7d3f69631152b03db 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1489,14 +1489,16 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cmd_trb = xhci->cmd_ring->dequeue; trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); - + cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, cmd_trb); /* * Check whether the completion event is for our internal kept * command. */ - if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) { + if (!cmd_dequeue_dma || ((cmd_dma != (u64)cmd_dequeue_dma) && + !((xhci->quirks & XHCI_USB3_NOOP) && (cmd_comp_code == + COMP_COMMAND_RING_STOPPED)))) { xhci_warn(xhci, "ERROR mismatched command completion event\n"); return; @@ -1506,8 +1508,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cancel_delayed_work(&xhci->cmd_timer); - cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); - /* If CMD ring stopped we own the trbs between enqueue and dequeue */ if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { complete_all(&xhci->cmd_ring_stop_completion); @@ -1529,6 +1529,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, if (cmd_comp_code == COMP_COMMAND_ABORTED) { xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; if (cmd->status == COMP_COMMAND_ABORTED) { + if (xhci->quirks & XHCI_USB3_NOOP) + trb_to_noop(cmd->command_trb, TRB_CMD_NOOP); if (xhci->current_cmd == cmd) xhci->current_cmd = NULL; goto event_handled; @@ -1607,11 +1609,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, } static void handle_vendor_event(struct xhci_hcd *xhci, - union xhci_trb *event) + union xhci_trb *event, u32 trb_type) { - u32 trb_type; - - trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3])); xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type); if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST)) handle_cmd_completion(xhci, &event->event_cmd); @@ -1667,7 +1666,8 @@ static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) } static void handle_port_status(struct xhci_hcd *xhci, - union xhci_trb *event) + struct xhci_interrupter *ir, + union xhci_trb *event) { struct usb_hcd *hcd; u32 port_id; @@ -1690,7 +1690,7 @@ static void handle_port_status(struct xhci_hcd *xhci, if ((port_id <= 0) || (port_id > max_ports)) { xhci_warn(xhci, "Port change event with invalid port ID %d\n", port_id); - inc_deq(xhci, xhci->event_ring); + inc_deq(xhci, ir->event_ring); return; } @@ -1820,7 +1820,7 @@ static void handle_port_status(struct xhci_hcd *xhci, cleanup: /* Update event ring dequeue pointer before dropping the lock */ - inc_deq(xhci, xhci->event_ring); + inc_deq(xhci, ir->event_ring); /* Don't make the USB core poll the roothub if we got a bad port status * change event. Besides, at that point we can't tell which roothub @@ -2415,7 +2415,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, * At this point, the host controller is probably hosed and should be reset. */ static int handle_tx_event(struct xhci_hcd *xhci, - struct xhci_transfer_event *event) + struct xhci_interrupter *ir, + struct xhci_transfer_event *event) { struct xhci_virt_device *xdev; struct xhci_virt_ep *ep; @@ -2766,7 +2767,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, * processing missed tds. */ if (!handling_skipped_tds) - inc_deq(xhci, xhci->event_ring); + inc_deq(xhci, ir->event_ring); /* * If ep->skip is set, it means there are missed tds on the @@ -2781,8 +2782,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, err_out: xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n", (unsigned long long) xhci_trb_virt_to_dma( - xhci->event_ring->deq_seg, - xhci->event_ring->dequeue), + ir->event_ring->deq_seg, + ir->event_ring->dequeue), lower_32_bits(le64_to_cpu(event->buffer)), upper_32_bits(le64_to_cpu(event->buffer)), le32_to_cpu(event->transfer_len), @@ -2796,56 +2797,56 @@ static int handle_tx_event(struct xhci_hcd *xhci, * Returns >0 for "possibly more events to process" (caller should call again), * otherwise 0 if done. In future, <0 returns should indicate error code. */ -static int xhci_handle_event(struct xhci_hcd *xhci) +static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir) { union xhci_trb *event; int update_ptrs = 1; + u32 trb_type; int ret; /* Event ring hasn't been allocated yet. */ - if (!xhci->event_ring || !xhci->event_ring->dequeue) { - xhci_err(xhci, "ERROR event ring not ready\n"); + if (!ir || !ir->event_ring || !ir->event_ring->dequeue) { + xhci_err(xhci, "ERROR interrupter not ready\n"); return -ENOMEM; } - event = xhci->event_ring->dequeue; + event = ir->event_ring->dequeue; /* Does the HC or OS own the TRB? */ if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != - xhci->event_ring->cycle_state) + ir->event_ring->cycle_state) return 0; - trace_xhci_handle_event(xhci->event_ring, &event->generic); + trace_xhci_handle_event(ir->event_ring, &event->generic); /* * Barrier between reading the TRB_CYCLE (valid) flag above and any * speculative reads of the event's flags/data below. */ rmb(); + trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); /* FIXME: Handle more event types. */ - switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) { - case TRB_TYPE(TRB_COMPLETION): + + switch (trb_type) { + case TRB_COMPLETION: handle_cmd_completion(xhci, &event->event_cmd); break; - case TRB_TYPE(TRB_PORT_STATUS): - handle_port_status(xhci, event); + case TRB_PORT_STATUS: + handle_port_status(xhci, ir, event); update_ptrs = 0; break; - case TRB_TYPE(TRB_TRANSFER): - ret = handle_tx_event(xhci, &event->trans_event); + case TRB_TRANSFER: + ret = handle_tx_event(xhci, ir, &event->trans_event); if (ret >= 0) update_ptrs = 0; break; - case TRB_TYPE(TRB_DEV_NOTE): + case TRB_DEV_NOTE: handle_device_notification(xhci, event); break; default: - if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >= - TRB_TYPE(48)) - handle_vendor_event(xhci, event); + if (trb_type >= TRB_VENDOR_DEFINED_LOW) + handle_vendor_event(xhci, event, trb_type); else - xhci_warn(xhci, "ERROR unknown event type %d\n", - TRB_FIELD_TO_TYPE( - le32_to_cpu(event->event_cmd.flags))); + xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type); } /* Any of the above functions may drop and re-acquire the lock, so check * to make sure a watchdog timer didn't mark the host as non-responsive. @@ -2858,7 +2859,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci) if (update_ptrs) /* Update SW event ring dequeue pointer */ - inc_deq(xhci, xhci->event_ring); + inc_deq(xhci, ir->event_ring); /* Are there more items on the event ring? Caller will call us again to * check. @@ -2872,16 +2873,18 @@ static int xhci_handle_event(struct xhci_hcd *xhci) * - To avoid "Event Ring Full Error" condition */ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, - union xhci_trb *event_ring_deq) + struct xhci_interrupter *ir, + union xhci_trb *event_ring_deq, + bool clear_ehb) { u64 temp_64; dma_addr_t deq; - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); /* If necessary, update the HW's version of the event ring deq ptr. */ - if (event_ring_deq != xhci->event_ring->dequeue) { - deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, - xhci->event_ring->dequeue); + if (event_ring_deq != ir->event_ring->dequeue) { + deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg, + ir->event_ring->dequeue); if (deq == 0) xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n"); /* @@ -2893,13 +2896,14 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, return; /* Update HC event ring dequeue pointer */ - temp_64 &= ERST_PTR_MASK; + temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK; temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); } /* Clear the event handler busy flag (RW1C) */ - temp_64 |= ERST_EHB; - xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue); + if (clear_ehb) + temp_64 |= ERST_EHB; + xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue); } /* @@ -2911,6 +2915,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); union xhci_trb *event_ring_deq; + struct xhci_interrupter *ir; irqreturn_t ret = IRQ_NONE; unsigned long flags; u64 temp_64; @@ -2944,11 +2949,13 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) status |= STS_EINT; writel(status, &xhci->op_regs->status); + /* This is the handler of the primary interrupter */ + ir = xhci->interrupters[0]; if (!hcd->msi_enabled) { u32 irq_pending; - irq_pending = readl(&xhci->ir_set->irq_pending); + irq_pending = readl(&ir->ir_set->irq_pending); irq_pending |= IMAN_IP; - writel(irq_pending, &xhci->ir_set->irq_pending); + writel(irq_pending, &ir->ir_set->irq_pending); } if (xhci->xhc_state & XHCI_STATE_DYING || @@ -2958,27 +2965,27 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) /* Clear the event handler busy flag (RW1C); * the event ring should be empty. */ - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); xhci_write_64(xhci, temp_64 | ERST_EHB, - &xhci->ir_set->erst_dequeue); + &ir->ir_set->erst_dequeue); ret = IRQ_HANDLED; goto out; } - event_ring_deq = xhci->event_ring->dequeue; + event_ring_deq = ir->event_ring->dequeue; /* FIXME this should be a delayed service routine * that clears the EHB. */ - while (xhci_handle_event(xhci) > 0) { + while (xhci_handle_event(xhci, ir) > 0) { if (event_loop++ < TRBS_PER_SEGMENT / 2) continue; - xhci_update_erst_dequeue(xhci, event_ring_deq); - event_ring_deq = xhci->event_ring->dequeue; + xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false); + event_ring_deq = ir->event_ring->dequeue; event_loop = 0; } - xhci_update_erst_dequeue(xhci, event_ring_deq); + xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true); ret = IRQ_HANDLED; out: diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 485e6a47d346f26b1e5c57e2e2a26a1586054ba0..cf7ddeea0f37e4f743dfd3262efd6afac86f107c 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -288,6 +288,32 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci) xhci_info(xhci, "Fault detected\n"); } +int xhci_enable_interrupter(struct xhci_interrupter *ir) +{ + u32 iman; + + if (!ir || !ir->ir_set) + return -EINVAL; + + iman = readl(&ir->ir_set->irq_pending); + writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending); + + return 0; +} + +int xhci_disable_interrupter(struct xhci_interrupter *ir) +{ + u32 iman; + + if (!ir || !ir->ir_set) + return -EINVAL; + + iman = readl(&ir->ir_set->irq_pending); + writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending); + + return 0; +} + #ifdef CONFIG_USB_PCI /* * Set up MSI @@ -603,11 +629,29 @@ static int xhci_init(struct usb_hcd *hcd) /*-------------------------------------------------------------------------*/ - static int xhci_run_finished(struct xhci_hcd *xhci) { + struct xhci_interrupter *ir = xhci->interrupters[0]; + unsigned long flags; + u32 temp; + + /* + * Enable interrupts before starting the host (xhci 4.2 and 5.5.2). + * Protect the short window before host is running with a lock + */ + spin_lock_irqsave(&xhci->lock, flags); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts"); + temp = readl(&xhci->op_regs->command); + temp |= (CMD_EIE); + writel(temp, &xhci->op_regs->command); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter"); + xhci_enable_interrupter(ir); + if (xhci_start(xhci)) { xhci_halt(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); return -ENODEV; } xhci->shared_hcd->state = HC_STATE_RUNNING; @@ -618,6 +662,8 @@ static int xhci_run_finished(struct xhci_hcd *xhci) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_run for USB3 roothub"); + + spin_unlock_irqrestore(&xhci->lock, flags); return 0; } @@ -639,7 +685,7 @@ int xhci_run(struct usb_hcd *hcd) u64 temp_64; int ret; struct xhci_hcd *xhci = hcd_to_xhci(hcd); - + struct xhci_interrupter *ir = xhci->interrupters[0]; /* Start the xHCI host controller running only after the USB 2.0 roothub * is setup. */ @@ -654,30 +700,17 @@ int xhci_run(struct usb_hcd *hcd) if (ret) return ret; - temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); + temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); temp_64 &= ~ERST_PTR_MASK; xhci_dbg_trace(xhci, trace_xhci_dbg_init, "ERST deq = 64'h%0lx", (long unsigned int) temp_64); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Set the interrupt modulation register"); - temp = readl(&xhci->ir_set->irq_control); + temp = readl(&ir->ir_set->irq_control); temp &= ~ER_IRQ_INTERVAL_MASK; temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; - writel(temp, &xhci->ir_set->irq_control); - - /* Set the HCD state before we enable the irqs */ - temp = readl(&xhci->op_regs->command); - temp |= (CMD_EIE); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Enable interrupts, cmd = 0x%x.", temp); - writel(temp, &xhci->op_regs->command); - - temp = readl(&xhci->ir_set->irq_pending); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", - xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); - writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); + writel(temp, &ir->ir_set->irq_control); if (xhci->quirks & XHCI_NEC_HOST) { struct xhci_command *command; @@ -715,6 +748,7 @@ static void xhci_stop(struct usb_hcd *hcd) { u32 temp; struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_interrupter *ir = xhci->interrupters[0]; mutex_lock(&xhci->mutex); @@ -751,8 +785,7 @@ static void xhci_stop(struct usb_hcd *hcd) "// Disabling event ring interrupts"); temp = readl(&xhci->op_regs->status); writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); - temp = readl(&xhci->ir_set->irq_pending); - writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); + xhci_disable_interrupter(ir); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); xhci_mem_cleanup(xhci); @@ -819,28 +852,51 @@ EXPORT_SYMBOL_GPL(xhci_shutdown); #ifdef CONFIG_PM static void xhci_save_registers(struct xhci_hcd *xhci) { + struct xhci_interrupter *ir; + unsigned int i; + xhci->s3.command = readl(&xhci->op_regs->command); xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); - xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); - xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); - xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); - xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); - xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); + + /* save both primary and all secondary interrupters */ + /* fixme, shold we lock to prevent race with remove secondary interrupter? */ + for (i = 0; i < xhci->max_interrupters; i++) { + ir = xhci->interrupters[i]; + if (!ir) + continue; + + ir->s3_erst_size = readl(&ir->ir_set->erst_size); + ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); + ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); + ir->s3_irq_pending = readl(&ir->ir_set->irq_pending); + ir->s3_irq_control = readl(&ir->ir_set->irq_control); + } } static void xhci_restore_registers(struct xhci_hcd *xhci) { + struct xhci_interrupter *ir; + unsigned int i; + writel(xhci->s3.command, &xhci->op_regs->command); writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); - writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); - xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); - xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); - writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); - writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); + + /* FIXME should we lock to protect against freeing of interrupters */ + for (i = 0; i < xhci->max_interrupters; i++) { + ir = xhci->interrupters[i]; + if (!ir) + continue; + + writel(ir->s3_erst_size, &ir->ir_set->erst_size); + xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base); + xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue); + writel(ir->s3_irq_pending, &ir->ir_set->irq_pending); + writel(ir->s3_irq_control, &ir->ir_set->irq_control); + } } static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) @@ -1199,8 +1255,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) xhci_dbg(xhci, "// Disabling event ring interrupts\n"); temp = readl(&xhci->op_regs->status); writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); - temp = readl(&xhci->ir_set->irq_pending); - writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); + xhci_disable_interrupter(xhci->interrupters[0]); xhci_dbg(xhci, "cleaning up memory\n"); xhci_mem_cleanup(xhci); @@ -1518,11 +1573,9 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag ep_index = xhci_get_endpoint_index(&urb->ep->desc); ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; - if (!HCD_HW_ACCESSIBLE(hcd)) { - if (!in_interrupt()) - xhci_dbg(xhci, "urb submitted during PCI suspend\n"); + if (!HCD_HW_ACCESSIBLE(hcd)) return -ESHUTDOWN; - } + if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); return -ENODEV; @@ -5329,6 +5382,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) if (xhci->hci_version > 0x100) xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); + /* xhci-plat or xhci-pci might have set max_interrupters already */ + if ((!xhci->max_interrupters) || + xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1)) + xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1); + xhci->quirks |= quirks; get_quirks(dev, xhci); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 0b59fb9d4bacd43990bd1feddc9138fe0d542cd8..4c7062c067c59ab2d8b856164be8aee7e764deda 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -513,6 +513,9 @@ struct xhci_intr_reg { /* Preserve bits 16:31 of erst_size */ #define ERST_SIZE_MASK (0xffff << 16) +/* erst_base bitmasks */ +#define ERST_BASE_RSVDP (0x3f) + /* erst_dequeue bitmasks */ /* Dequeue ERST Segment Index (DESI) - Segment number (or alias) * where the current dequeue pointer lies. This is an optional HW hint. @@ -1418,7 +1421,7 @@ union xhci_trb { /* MFINDEX Wrap Event - microframe counter wrapped */ #define TRB_MFINDEX_WRAP 39 /* TRB IDs 40-47 reserved, 48-63 is vendor-defined */ - +#define TRB_VENDOR_DEFINED_LOW 48 /* Nec vendor-specific command completion event. */ #define TRB_NEC_CMD_COMP 48 /* Get NEC firmware revision. */ @@ -1530,6 +1533,7 @@ struct xhci_segment { union xhci_trb *trbs; /* private to HCD */ struct xhci_segment *next; + unsigned int num; dma_addr_t dma; /* Max packet sized bounce buffer for td-fragmant alignment */ dma_addr_t bounce_dma; @@ -1656,8 +1660,9 @@ struct urb_priv { * Each segment table entry is 4*32bits long. 1K seems like an ok size: * (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table, * meaning 64 ring segments. - * Initial allocated size of the ERST, in number of entries */ -#define ERST_NUM_SEGS 1 + * Reasonable limit for number of Event Ring segments (spec allows 32k) + */ +#define ERST_MAX_SEGS 2 /* Initial allocated size of the ERST, in number of entries */ #define ERST_SIZE 64 /* Initial number of event segment rings allocated */ @@ -1673,11 +1678,6 @@ struct s3_save { u32 dev_nt; u64 dcbaa_ptr; u32 config_reg; - u32 irq_pending; - u32 irq_control; - u32 erst_size; - u64 erst_base; - u64 erst_dequeue; }; /* Use for lpm */ @@ -1704,7 +1704,18 @@ struct xhci_bus_state { struct completion u3exit_done[USB_MAXCHILDREN]; }; - +struct xhci_interrupter { + struct xhci_ring *event_ring; + struct xhci_erst erst; + struct xhci_intr_reg __iomem *ir_set; + unsigned int intr_num; + /* For interrupter registers save and restore over suspend/resume */ + u32 s3_irq_pending; + u32 s3_irq_control; + u32 s3_erst_size; + u64 s3_erst_base; + u64 s3_erst_dequeue; +}; /* * It can take up to 20 ms to transition from RExit to U0 on the * Intel Lynx Point LP xHCI host. @@ -1746,8 +1757,6 @@ struct xhci_hcd { struct xhci_op_regs __iomem *op_regs; struct xhci_run_regs __iomem *run_regs; struct xhci_doorbell_array __iomem *dba; - /* Our HCD's current interrupter register set */ - struct xhci_intr_reg __iomem *ir_set; /* Cached register copies of read-only HC data */ __u32 hcs_params1; @@ -1762,7 +1771,7 @@ struct xhci_hcd { u8 sbrn; u16 hci_version; u8 max_slots; - u8 max_interrupters; + u16 max_interrupters; u8 max_ports; u8 isoc_threshold; /* imod_interval in ns (I * 250ns) */ @@ -1781,6 +1790,7 @@ struct xhci_hcd { struct reset_control *reset; /* data structures */ struct xhci_device_context_array *dcbaa; + struct xhci_interrupter **interrupters; struct xhci_ring *cmd_ring; unsigned int cmd_ring_state; #define CMD_RING_STATE_RUNNING (1 << 0) @@ -1791,8 +1801,7 @@ struct xhci_hcd { struct delayed_work cmd_timer; struct completion cmd_ring_stop_completion; struct xhci_command *current_cmd; - struct xhci_ring *event_ring; - struct xhci_erst erst; + /* Scratchpad */ struct xhci_scratchpad *scratchpad; /* Store LPM test failed devices' information */ @@ -1835,6 +1844,7 @@ struct xhci_hcd { #define XHCI_STATE_HALTED (1 << 1) #define XHCI_STATE_REMOVING (1 << 2) unsigned long long quirks; +#define XHCI_USB3_NOOP BIT_ULL(63) #define XHCI_LINK_TRB_QUIRK BIT_ULL(0) #define XHCI_RESET_EP_QUIRK BIT_ULL(1) #define XHCI_NEC_HOST BIT_ULL(2) @@ -2070,6 +2080,10 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, int type, gfp_t flags); void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); +struct xhci_interrupter * +xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg); +void xhci_remove_secondary_interrupter(struct usb_hcd + *hcd, struct xhci_interrupter *ir); /* xHCI host controller glue */ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); @@ -2098,6 +2112,8 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); +int xhci_enable_interrupter(struct xhci_interrupter *ir); +int xhci_disable_interrupter(struct xhci_interrupter *ir); /* xHCI ring, segment, TRB, and TD functions */ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);